]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ix.c
MFV r324145,324147:
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41
42 #include "ixgbe.h"
43
44 /************************************************************************
45  * Driver version
46  ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
48
49
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105         /* required last entry */
106         {0, 0, 0, 0, 0}
107 };
108
109 /************************************************************************
110  * Table of branding strings
111  ************************************************************************/
112 static char    *ixgbe_strings[] = {
113         "Intel(R) PRO/10GbE PCI-Express Network Driver"
114 };
115
116 /************************************************************************
117  * Function prototypes
118  ************************************************************************/
119 static int      ixgbe_probe(device_t);
120 static int      ixgbe_attach(device_t);
121 static int      ixgbe_detach(device_t);
122 static int      ixgbe_shutdown(device_t);
123 static int      ixgbe_suspend(device_t);
124 static int      ixgbe_resume(device_t);
125 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void     ixgbe_init(void *);
127 static void     ixgbe_init_locked(struct adapter *);
128 static void     ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131 #endif
132 static void     ixgbe_init_device_features(struct adapter *);
133 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void     ixgbe_add_media_types(struct adapter *);
135 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int      ixgbe_media_change(struct ifnet *);
137 static int      ixgbe_allocate_pci_resources(struct adapter *);
138 static void     ixgbe_get_slot_info(struct adapter *);
139 static int      ixgbe_allocate_msix(struct adapter *);
140 static int      ixgbe_allocate_legacy(struct adapter *);
141 static int      ixgbe_configure_interrupts(struct adapter *);
142 static void     ixgbe_free_pci_resources(struct adapter *);
143 static void     ixgbe_local_timer(void *);
144 static int      ixgbe_setup_interface(device_t, struct adapter *);
145 static void     ixgbe_config_gpie(struct adapter *);
146 static void     ixgbe_config_dmac(struct adapter *);
147 static void     ixgbe_config_delay_values(struct adapter *);
148 static void     ixgbe_config_link(struct adapter *);
149 static void     ixgbe_check_wol_support(struct adapter *);
150 static int      ixgbe_setup_low_power_mode(struct adapter *);
151 static void     ixgbe_rearm_queues(struct adapter *, u64);
152
153 static void     ixgbe_initialize_transmit_units(struct adapter *);
154 static void     ixgbe_initialize_receive_units(struct adapter *);
155 static void     ixgbe_enable_rx_drop(struct adapter *);
156 static void     ixgbe_disable_rx_drop(struct adapter *);
157 static void     ixgbe_initialize_rss_mapping(struct adapter *);
158
159 static void     ixgbe_enable_intr(struct adapter *);
160 static void     ixgbe_disable_intr(struct adapter *);
161 static void     ixgbe_update_stats_counters(struct adapter *);
162 static void     ixgbe_set_promisc(struct adapter *);
163 static void     ixgbe_set_multi(struct adapter *);
164 static void     ixgbe_update_link_status(struct adapter *);
165 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void     ixgbe_configure_ivars(struct adapter *);
167 static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168
169 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172
173 static void     ixgbe_add_device_sysctls(struct adapter *);
174 static void     ixgbe_add_hw_stats(struct adapter *);
175 static int      ixgbe_set_flowcntl(struct adapter *, int);
176 static int      ixgbe_set_advertise(struct adapter *, int);
177 static int      ixgbe_get_advertise(struct adapter *);
178
179 /* Sysctl handlers */
180 static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                        const char *, int *, int);
182 static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188 #ifdef IXGBE_DEBUG
189 static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191 #endif
192 static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199
200 /* Support for pluggable optic modules */
201 static bool     ixgbe_sfp_probe(struct adapter *);
202
203 /* Legacy (single vector) interrupt handler */
204 static void     ixgbe_legacy_irq(void *);
205
206 /* The MSI/MSI-X Interrupt handlers */
207 static void     ixgbe_msix_que(void *);
208 static void     ixgbe_msix_link(void *);
209
210 /* Deferred interrupt tasklets */
211 static void     ixgbe_handle_que(void *, int);
212 static void     ixgbe_handle_link(void *, int);
213 static void     ixgbe_handle_msf(void *, int);
214 static void     ixgbe_handle_mod(void *, int);
215 static void     ixgbe_handle_phy(void *, int);
216
217
218 /************************************************************************
219  *  FreeBSD Device Interface Entry Points
220  ************************************************************************/
221 static device_method_t ix_methods[] = {
222         /* Device interface */
223         DEVMETHOD(device_probe, ixgbe_probe),
224         DEVMETHOD(device_attach, ixgbe_attach),
225         DEVMETHOD(device_detach, ixgbe_detach),
226         DEVMETHOD(device_shutdown, ixgbe_shutdown),
227         DEVMETHOD(device_suspend, ixgbe_suspend),
228         DEVMETHOD(device_resume, ixgbe_resume),
229 #ifdef PCI_IOV
230         DEVMETHOD(pci_iov_init, ixgbe_init_iov),
231         DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
232         DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
233 #endif /* PCI_IOV */
234         DEVMETHOD_END
235 };
236
237 static driver_t ix_driver = {
238         "ix", ix_methods, sizeof(struct adapter),
239 };
240
241 devclass_t ix_devclass;
242 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
243
244 MODULE_DEPEND(ix, pci, 1, 1, 1);
245 MODULE_DEPEND(ix, ether, 1, 1, 1);
246 #ifdef DEV_NETMAP
247 MODULE_DEPEND(ix, netmap, 1, 1, 1);
248 #endif
249
250 /*
251  * TUNEABLE PARAMETERS:
252  */
253
254 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
255
256 /*
257  * AIM: Adaptive Interrupt Moderation
258  * which means that the interrupt rate
259  * is varied over time based on the
260  * traffic for that interrupt vector
261  */
262 static int ixgbe_enable_aim = TRUE;
263 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
264     "Enable adaptive interrupt moderation");
265
266 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
267 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
268     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
269
270 /* How many packets rxeof tries to clean at a time */
271 static int ixgbe_rx_process_limit = 256;
272 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
273     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
274
275 /* How many packets txeof tries to clean at a time */
276 static int ixgbe_tx_process_limit = 256;
277 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
278     &ixgbe_tx_process_limit, 0,
279     "Maximum number of sent packets to process at a time, -1 means unlimited");
280
281 /* Flow control setting, default to full */
282 static int ixgbe_flow_control = ixgbe_fc_full;
283 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
284     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
285
286 /* Advertise Speed, default to 0 (auto) */
287 static int ixgbe_advertise_speed = 0;
288 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
289     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
290
291 /*
292  * Smart speed setting, default to on
293  * this only works as a compile option
294  * right now as its during attach, set
295  * this to 'ixgbe_smart_speed_off' to
296  * disable.
297  */
298 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
299
300 /*
301  * MSI-X should be the default for best performance,
302  * but this allows it to be forced off for testing.
303  */
304 static int ixgbe_enable_msix = 1;
305 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
306     "Enable MSI-X interrupts");
307
308 /*
309  * Number of Queues, can be set to 0,
310  * it then autoconfigures based on the
311  * number of cpus with a max of 8. This
312  * can be overriden manually here.
313  */
314 static int ixgbe_num_queues = 0;
315 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
316     "Number of queues to configure, 0 indicates autoconfigure");
317
318 /*
319  * Number of TX descriptors per ring,
320  * setting higher than RX as this seems
321  * the better performing choice.
322  */
323 static int ixgbe_txd = PERFORM_TXD;
324 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
325     "Number of transmit descriptors per queue");
326
327 /* Number of RX descriptors per ring */
328 static int ixgbe_rxd = PERFORM_RXD;
329 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
330     "Number of receive descriptors per queue");
331
332 /*
333  * Defining this on will allow the use
334  * of unsupported SFP+ modules, note that
335  * doing so you are on your own :)
336  */
337 static int allow_unsupported_sfp = FALSE;
338 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
339     &allow_unsupported_sfp, 0,
340     "Allow unsupported SFP modules...use at your own risk");
341
342 /*
343  * Not sure if Flow Director is fully baked,
344  * so we'll default to turning it off.
345  */
346 static int ixgbe_enable_fdir = 0;
347 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
348     "Enable Flow Director");
349
350 /* Legacy Transmit (single queue) */
351 static int ixgbe_enable_legacy_tx = 0;
352 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
353     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
354
355 /* Receive-Side Scaling */
356 static int ixgbe_enable_rss = 1;
357 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
358     "Enable Receive-Side Scaling (RSS)");
359
360 /* Keep running tab on them for sanity check */
361 static int ixgbe_total_ports;
362
363 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
364 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
365
366 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
367
368 /************************************************************************
369  * ixgbe_initialize_rss_mapping
370  ************************************************************************/
371 static void
372 ixgbe_initialize_rss_mapping(struct adapter *adapter)
373 {
374         struct ixgbe_hw *hw = &adapter->hw;
375         u32             reta = 0, mrqc, rss_key[10];
376         int             queue_id, table_size, index_mult;
377         int             i, j;
378         u32             rss_hash_config;
379
380         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
381                 /* Fetch the configured RSS key */
382                 rss_getkey((uint8_t *)&rss_key);
383         } else {
384                 /* set up random bits */
385                 arc4rand(&rss_key, sizeof(rss_key), 0);
386         }
387
388         /* Set multiplier for RETA setup and table size based on MAC */
389         index_mult = 0x1;
390         table_size = 128;
391         switch (adapter->hw.mac.type) {
392         case ixgbe_mac_82598EB:
393                 index_mult = 0x11;
394                 break;
395         case ixgbe_mac_X550:
396         case ixgbe_mac_X550EM_x:
397         case ixgbe_mac_X550EM_a:
398                 table_size = 512;
399                 break;
400         default:
401                 break;
402         }
403
404         /* Set up the redirection table */
405         for (i = 0, j = 0; i < table_size; i++, j++) {
406                 if (j == adapter->num_queues)
407                         j = 0;
408
409                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
410                         /*
411                          * Fetch the RSS bucket id for the given indirection
412                          * entry. Cap it at the number of configured buckets
413                          * (which is num_queues.)
414                          */
415                         queue_id = rss_get_indirection_to_bucket(i);
416                         queue_id = queue_id % adapter->num_queues;
417                 } else
418                         queue_id = (j * index_mult);
419
420                 /*
421                  * The low 8 bits are for hash value (n+0);
422                  * The next 8 bits are for hash value (n+1), etc.
423                  */
424                 reta = reta >> 8;
425                 reta = reta | (((uint32_t)queue_id) << 24);
426                 if ((i & 3) == 3) {
427                         if (i < 128)
428                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
429                         else
430                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
431                                     reta);
432                         reta = 0;
433                 }
434         }
435
436         /* Now fill our hash function seeds */
437         for (i = 0; i < 10; i++)
438                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
439
440         /* Perform hash on these packet types */
441         if (adapter->feat_en & IXGBE_FEATURE_RSS)
442                 rss_hash_config = rss_gethashconfig();
443         else {
444                 /*
445                  * Disable UDP - IP fragments aren't currently being handled
446                  * and so we end up with a mix of 2-tuple and 4-tuple
447                  * traffic.
448                  */
449                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
450                                 | RSS_HASHTYPE_RSS_TCP_IPV4
451                                 | RSS_HASHTYPE_RSS_IPV6
452                                 | RSS_HASHTYPE_RSS_TCP_IPV6
453                                 | RSS_HASHTYPE_RSS_IPV6_EX
454                                 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
455         }
456
457         mrqc = IXGBE_MRQC_RSSEN;
458         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
459                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
460         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
461                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
462         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
463                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
464         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
465                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
466         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
467                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
468         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
469                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
470         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
471                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
472         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
473                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
474                     __func__);
475         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
476                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
477         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
478                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
479         mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
480         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
481 } /* ixgbe_initialize_rss_mapping */
482
483 /************************************************************************
484  * ixgbe_initialize_receive_units - Setup receive registers and features.
485  ************************************************************************/
486 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
487
488 static void
489 ixgbe_initialize_receive_units(struct adapter *adapter)
490 {
491         struct rx_ring  *rxr = adapter->rx_rings;
492         struct ixgbe_hw *hw = &adapter->hw;
493         struct ifnet    *ifp = adapter->ifp;
494         int             i, j;
495         u32             bufsz, fctrl, srrctl, rxcsum;
496         u32             hlreg;
497
498         /*
499          * Make sure receives are disabled while
500          * setting up the descriptor ring
501          */
502         ixgbe_disable_rx(hw);
503
504         /* Enable broadcasts */
505         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
506         fctrl |= IXGBE_FCTRL_BAM;
507         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
508                 fctrl |= IXGBE_FCTRL_DPF;
509                 fctrl |= IXGBE_FCTRL_PMCF;
510         }
511         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
512
513         /* Set for Jumbo Frames? */
514         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
515         if (ifp->if_mtu > ETHERMTU)
516                 hlreg |= IXGBE_HLREG0_JUMBOEN;
517         else
518                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
519
520 #ifdef DEV_NETMAP
521         /* CRC stripping is conditional in Netmap */
522         if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
523             (ifp->if_capenable & IFCAP_NETMAP) &&
524             !ix_crcstrip)
525                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
526         else
527 #endif /* DEV_NETMAP */
528                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
529
530         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
531
532         bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
533             IXGBE_SRRCTL_BSIZEPKT_SHIFT;
534
535         for (i = 0; i < adapter->num_queues; i++, rxr++) {
536                 u64 rdba = rxr->rxdma.dma_paddr;
537                 j = rxr->me;
538
539                 /* Setup the Base and Length of the Rx Descriptor Ring */
540                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
541                     (rdba & 0x00000000ffffffffULL));
542                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
543                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
544                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
545
546                 /* Set up the SRRCTL register */
547                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
548                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
549                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
550                 srrctl |= bufsz;
551                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
552
553                 /*
554                  * Set DROP_EN iff we have no flow control and >1 queue.
555                  * Note that srrctl was cleared shortly before during reset,
556                  * so we do not need to clear the bit, but do it just in case
557                  * this code is moved elsewhere.
558                  */
559                 if (adapter->num_queues > 1 &&
560                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
561                         srrctl |= IXGBE_SRRCTL_DROP_EN;
562                 } else {
563                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
564                 }
565
566                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
567
568                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
569                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
570                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
571
572                 /* Set the driver rx tail address */
573                 rxr->tail =  IXGBE_RDT(rxr->me);
574         }
575
576         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
577                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
578                             | IXGBE_PSRTYPE_UDPHDR
579                             | IXGBE_PSRTYPE_IPV4HDR
580                             | IXGBE_PSRTYPE_IPV6HDR;
581                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
582         }
583
584         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
585
586         ixgbe_initialize_rss_mapping(adapter);
587
588         if (adapter->num_queues > 1) {
589                 /* RSS and RX IPP Checksum are mutually exclusive */
590                 rxcsum |= IXGBE_RXCSUM_PCSD;
591         }
592
593         if (ifp->if_capenable & IFCAP_RXCSUM)
594                 rxcsum |= IXGBE_RXCSUM_PCSD;
595
596         /* This is useful for calculating UDP/IP fragment checksums */
597         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
598                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
599
600         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
601
602         return;
603 } /* ixgbe_initialize_receive_units */
604
605 /************************************************************************
606  * ixgbe_initialize_transmit_units - Enable transmit units.
607  ************************************************************************/
608 static void
609 ixgbe_initialize_transmit_units(struct adapter *adapter)
610 {
611         struct tx_ring  *txr = adapter->tx_rings;
612         struct ixgbe_hw *hw = &adapter->hw;
613
614         /* Setup the Base and Length of the Tx Descriptor Ring */
615         for (int i = 0; i < adapter->num_queues; i++, txr++) {
616                 u64 tdba = txr->txdma.dma_paddr;
617                 u32 txctrl = 0;
618                 int j = txr->me;
619
620                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
621                     (tdba & 0x00000000ffffffffULL));
622                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
623                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
624                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
625
626                 /* Setup the HW Tx Head and Tail descriptor pointers */
627                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
628                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
629
630                 /* Cache the tail address */
631                 txr->tail = IXGBE_TDT(j);
632
633                 /* Disable Head Writeback */
634                 /*
635                  * Note: for X550 series devices, these registers are actually
636                  * prefixed with TPH_ isntead of DCA_, but the addresses and
637                  * fields remain the same.
638                  */
639                 switch (hw->mac.type) {
640                 case ixgbe_mac_82598EB:
641                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
642                         break;
643                 default:
644                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
645                         break;
646                 }
647                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
648                 switch (hw->mac.type) {
649                 case ixgbe_mac_82598EB:
650                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
651                         break;
652                 default:
653                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
654                         break;
655                 }
656
657         }
658
659         if (hw->mac.type != ixgbe_mac_82598EB) {
660                 u32 dmatxctl, rttdcs;
661
662                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
663                 dmatxctl |= IXGBE_DMATXCTL_TE;
664                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
665                 /* Disable arbiter to set MTQC */
666                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
667                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
668                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
669                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
670                     ixgbe_get_mtqc(adapter->iov_mode));
671                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
672                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
673         }
674
675         return;
676 } /* ixgbe_initialize_transmit_units */
677
678 /************************************************************************
679  * ixgbe_attach - Device initialization routine
680  *
681  *   Called when the driver is being loaded.
682  *   Identifies the type of hardware, allocates all resources
683  *   and initializes the hardware.
684  *
685  *   return 0 on success, positive on failure
686  ************************************************************************/
687 static int
688 ixgbe_attach(device_t dev)
689 {
690         struct adapter  *adapter;
691         struct ixgbe_hw *hw;
692         int             error = 0;
693         u32             ctrl_ext;
694
695         INIT_DEBUGOUT("ixgbe_attach: begin");
696
697         /* Allocate, clear, and link in our adapter structure */
698         adapter = device_get_softc(dev);
699         adapter->hw.back = adapter;
700         adapter->dev = dev;
701         hw = &adapter->hw;
702
703         /* Core Lock Init*/
704         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
705
706         /* Set up the timer callout */
707         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
708
709         /* Determine hardware revision */
710         hw->vendor_id = pci_get_vendor(dev);
711         hw->device_id = pci_get_device(dev);
712         hw->revision_id = pci_get_revid(dev);
713         hw->subsystem_vendor_id = pci_get_subvendor(dev);
714         hw->subsystem_device_id = pci_get_subdevice(dev);
715
716         /*
717          * Make sure BUSMASTER is set
718          */
719         pci_enable_busmaster(dev);
720
721         /* Do base PCI setup - map BAR0 */
722         if (ixgbe_allocate_pci_resources(adapter)) {
723                 device_printf(dev, "Allocation of PCI resources failed\n");
724                 error = ENXIO;
725                 goto err_out;
726         }
727
728         /* let hardware know driver is loaded */
729         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
730         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
731         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
732
733         /*
734          * Initialize the shared code
735          */
736         if (ixgbe_init_shared_code(hw)) {
737                 device_printf(dev, "Unable to initialize the shared code\n");
738                 error = ENXIO;
739                 goto err_out;
740         }
741
742         if (hw->mbx.ops.init_params)
743                 hw->mbx.ops.init_params(hw);
744
745         hw->allow_unsupported_sfp = allow_unsupported_sfp;
746
747         /* Pick up the 82599 settings */
748         if (hw->mac.type != ixgbe_mac_82598EB) {
749                 hw->phy.smart_speed = ixgbe_smart_speed;
750                 adapter->num_segs = IXGBE_82599_SCATTER;
751         } else
752                 adapter->num_segs = IXGBE_82598_SCATTER;
753
754         ixgbe_init_device_features(adapter);
755
756         if (ixgbe_configure_interrupts(adapter)) {
757                 error = ENXIO;
758                 goto err_out;
759         }
760
761         /* Allocate multicast array memory. */
762         adapter->mta = malloc(sizeof(*adapter->mta) *
763             MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
764         if (adapter->mta == NULL) {
765                 device_printf(dev, "Can not allocate multicast setup array\n");
766                 error = ENOMEM;
767                 goto err_out;
768         }
769
770         /* Enable WoL (if supported) */
771         ixgbe_check_wol_support(adapter);
772
773         /* Register for VLAN events */
774         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
775             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
776         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
777             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
778
779         /* Verify adapter fan is still functional (if applicable) */
780         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
781                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
782                 ixgbe_check_fan_failure(adapter, esdp, FALSE);
783         }
784
785         /* Ensure SW/FW semaphore is free */
786         ixgbe_init_swfw_semaphore(hw);
787
788         /* Enable EEE power saving */
789         if (adapter->feat_en & IXGBE_FEATURE_EEE)
790                 hw->mac.ops.setup_eee(hw, TRUE);
791
792         /* Set an initial default flow control value */
793         hw->fc.requested_mode = ixgbe_flow_control;
794
795         /* Sysctls for limiting the amount of work done in the taskqueues */
796         ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
797             "max number of rx packets to process",
798             &adapter->rx_process_limit, ixgbe_rx_process_limit);
799
800         ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
801             "max number of tx packets to process",
802             &adapter->tx_process_limit, ixgbe_tx_process_limit);
803
804         /* Do descriptor calc and sanity checks */
805         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
806             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
807                 device_printf(dev, "TXD config issue, using default!\n");
808                 adapter->num_tx_desc = DEFAULT_TXD;
809         } else
810                 adapter->num_tx_desc = ixgbe_txd;
811
812         /*
813          * With many RX rings it is easy to exceed the
814          * system mbuf allocation. Tuning nmbclusters
815          * can alleviate this.
816          */
817         if (nmbclusters > 0) {
818                 int s;
819                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
820                 if (s > nmbclusters) {
821                         device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
822                         ixgbe_rxd = DEFAULT_RXD;
823                 }
824         }
825
826         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
827             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
828                 device_printf(dev, "RXD config issue, using default!\n");
829                 adapter->num_rx_desc = DEFAULT_RXD;
830         } else
831                 adapter->num_rx_desc = ixgbe_rxd;
832
833         /* Allocate our TX/RX Queues */
834         if (ixgbe_allocate_queues(adapter)) {
835                 error = ENOMEM;
836                 goto err_out;
837         }
838
839         hw->phy.reset_if_overtemp = TRUE;
840         error = ixgbe_reset_hw(hw);
841         hw->phy.reset_if_overtemp = FALSE;
842         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
843                 /*
844                  * No optics in this port, set up
845                  * so the timer routine will probe
846                  * for later insertion.
847                  */
848                 adapter->sfp_probe = TRUE;
849                 error = IXGBE_SUCCESS;
850         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
851                 device_printf(dev, "Unsupported SFP+ module detected!\n");
852                 error = EIO;
853                 goto err_late;
854         } else if (error) {
855                 device_printf(dev, "Hardware initialization failed\n");
856                 error = EIO;
857                 goto err_late;
858         }
859
860         /* Make sure we have a good EEPROM before we read from it */
861         if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
862                 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
863                 error = EIO;
864                 goto err_late;
865         }
866
867         /* Setup OS specific network interface */
868         if (ixgbe_setup_interface(dev, adapter) != 0)
869                 goto err_late;
870
871         if (adapter->feat_en & IXGBE_FEATURE_MSIX)
872                 error = ixgbe_allocate_msix(adapter);
873         else
874                 error = ixgbe_allocate_legacy(adapter);
875         if (error)
876                 goto err_late;
877
878         error = ixgbe_start_hw(hw);
879         switch (error) {
880         case IXGBE_ERR_EEPROM_VERSION:
881                 device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
882                 break;
883         case IXGBE_ERR_SFP_NOT_SUPPORTED:
884                 device_printf(dev, "Unsupported SFP+ Module\n");
885                 error = EIO;
886                 goto err_late;
887         case IXGBE_ERR_SFP_NOT_PRESENT:
888                 device_printf(dev, "No SFP+ Module found\n");
889                 /* falls thru */
890         default:
891                 break;
892         }
893
894         /* Enable the optics for 82599 SFP+ fiber */
895         ixgbe_enable_tx_laser(hw);
896
897         /* Enable power to the phy. */
898         ixgbe_set_phy_power(hw, TRUE);
899
900         /* Initialize statistics */
901         ixgbe_update_stats_counters(adapter);
902
903         /* Check PCIE slot type/speed/width */
904         ixgbe_get_slot_info(adapter);
905
906         /*
907          * Do time init and sysctl init here, but
908          * only on the first port of a bypass adapter.
909          */
910         ixgbe_bypass_init(adapter);
911
912         /* Set an initial dmac value */
913         adapter->dmac = 0;
914         /* Set initial advertised speeds (if applicable) */
915         adapter->advertise = ixgbe_get_advertise(adapter);
916
917         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
918                 ixgbe_define_iov_schemas(dev, &error);
919
920         /* Add sysctls */
921         ixgbe_add_device_sysctls(adapter);
922         ixgbe_add_hw_stats(adapter);
923
924         /* For Netmap */
925         adapter->init_locked = ixgbe_init_locked;
926         adapter->stop_locked = ixgbe_stop;
927
928         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
929                 ixgbe_netmap_attach(adapter);
930
931         INIT_DEBUGOUT("ixgbe_attach: end");
932
933         return (0);
934
935 err_late:
936         ixgbe_free_transmit_structures(adapter);
937         ixgbe_free_receive_structures(adapter);
938         free(adapter->queues, M_DEVBUF);
939 err_out:
940         if (adapter->ifp != NULL)
941                 if_free(adapter->ifp);
942         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
943         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
944         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
945         ixgbe_free_pci_resources(adapter);
946         free(adapter->mta, M_IXGBE);
947         IXGBE_CORE_LOCK_DESTROY(adapter);
948
949         return (error);
950 } /* ixgbe_attach */
951
952 /************************************************************************
953  * ixgbe_check_wol_support
954  *
955  *   Checks whether the adapter's ports are capable of
956  *   Wake On LAN by reading the adapter's NVM.
957  *
958  *   Sets each port's hw->wol_enabled value depending
959  *   on the value read here.
960  ************************************************************************/
961 static void
962 ixgbe_check_wol_support(struct adapter *adapter)
963 {
964         struct ixgbe_hw *hw = &adapter->hw;
965         u16             dev_caps = 0;
966
967         /* Find out WoL support for port */
968         adapter->wol_support = hw->wol_enabled = 0;
969         ixgbe_get_device_caps(hw, &dev_caps);
970         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
971             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
972              hw->bus.func == 0))
973                 adapter->wol_support = hw->wol_enabled = 1;
974
975         /* Save initial wake up filter configuration */
976         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
977
978         return;
979 } /* ixgbe_check_wol_support */
980
981 /************************************************************************
982  * ixgbe_setup_interface
983  *
984  *   Setup networking device structure and register an interface.
985  ************************************************************************/
986 static int
987 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
988 {
989         struct ifnet *ifp;
990
991         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
992
993         ifp = adapter->ifp = if_alloc(IFT_ETHER);
994         if (ifp == NULL) {
995                 device_printf(dev, "can not allocate ifnet structure\n");
996                 return (-1);
997         }
998         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
999         ifp->if_baudrate = IF_Gbps(10);
1000         ifp->if_init = ixgbe_init;
1001         ifp->if_softc = adapter;
1002         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1003         ifp->if_ioctl = ixgbe_ioctl;
1004 #if __FreeBSD_version >= 1100036
1005         if_setgetcounterfn(ifp, ixgbe_get_counter);
1006 #endif
1007 #if __FreeBSD_version >= 1100045
1008         /* TSO parameters */
1009         ifp->if_hw_tsomax = 65518;
1010         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1011         ifp->if_hw_tsomaxsegsize = 2048;
1012 #endif
1013         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1014                 ifp->if_start = ixgbe_legacy_start;
1015                 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1016                 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1017                 IFQ_SET_READY(&ifp->if_snd);
1018                 ixgbe_start_locked = ixgbe_legacy_start_locked;
1019                 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1020         } else {
1021                 ifp->if_transmit = ixgbe_mq_start;
1022                 ifp->if_qflush = ixgbe_qflush;
1023                 ixgbe_start_locked = ixgbe_mq_start_locked;
1024                 ixgbe_ring_empty = drbr_empty;
1025         }
1026
1027         ether_ifattach(ifp, adapter->hw.mac.addr);
1028
1029         adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1030
1031         /*
1032          * Tell the upper layer(s) we support long frames.
1033          */
1034         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1035
1036         /* Set capability flags */
1037         ifp->if_capabilities |= IFCAP_HWCSUM
1038                              |  IFCAP_HWCSUM_IPV6
1039                              |  IFCAP_TSO
1040                              |  IFCAP_LRO
1041                              |  IFCAP_VLAN_HWTAGGING
1042                              |  IFCAP_VLAN_HWTSO
1043                              |  IFCAP_VLAN_HWCSUM
1044                              |  IFCAP_JUMBO_MTU
1045                              |  IFCAP_VLAN_MTU
1046                              |  IFCAP_HWSTATS;
1047
1048         /* Enable the above capabilities by default */
1049         ifp->if_capenable = ifp->if_capabilities;
1050
1051         /*
1052          * Don't turn this on by default, if vlans are
1053          * created on another pseudo device (eg. lagg)
1054          * then vlan events are not passed thru, breaking
1055          * operation, but with HW FILTER off it works. If
1056          * using vlans directly on the ixgbe driver you can
1057          * enable this and get full hardware tag filtering.
1058          */
1059         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1060
1061         /*
1062          * Specify the media types supported by this adapter and register
1063          * callbacks to update media and link information
1064          */
1065         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1066             ixgbe_media_status);
1067
1068         adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1069         ixgbe_add_media_types(adapter);
1070
1071         /* Set autoselect media by default */
1072         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1073
1074         return (0);
1075 } /* ixgbe_setup_interface */
1076
1077 #if __FreeBSD_version >= 1100036
1078 /************************************************************************
1079  * ixgbe_get_counter
1080  ************************************************************************/
1081 static uint64_t
1082 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1083 {
1084         struct adapter *adapter;
1085         struct tx_ring *txr;
1086         uint64_t       rv;
1087
1088         adapter = if_getsoftc(ifp);
1089
1090         switch (cnt) {
1091         case IFCOUNTER_IPACKETS:
1092                 return (adapter->ipackets);
1093         case IFCOUNTER_OPACKETS:
1094                 return (adapter->opackets);
1095         case IFCOUNTER_IBYTES:
1096                 return (adapter->ibytes);
1097         case IFCOUNTER_OBYTES:
1098                 return (adapter->obytes);
1099         case IFCOUNTER_IMCASTS:
1100                 return (adapter->imcasts);
1101         case IFCOUNTER_OMCASTS:
1102                 return (adapter->omcasts);
1103         case IFCOUNTER_COLLISIONS:
1104                 return (0);
1105         case IFCOUNTER_IQDROPS:
1106                 return (adapter->iqdrops);
1107         case IFCOUNTER_OQDROPS:
1108                 rv = 0;
1109                 txr = adapter->tx_rings;
1110                 for (int i = 0; i < adapter->num_queues; i++, txr++)
1111                         rv += txr->br->br_drops;
1112                 return (rv);
1113         case IFCOUNTER_IERRORS:
1114                 return (adapter->ierrors);
1115         default:
1116                 return (if_get_counter_default(ifp, cnt));
1117         }
1118 } /* ixgbe_get_counter */
1119 #endif
1120
1121 /************************************************************************
1122  * ixgbe_add_media_types
1123  ************************************************************************/
1124 static void
1125 ixgbe_add_media_types(struct adapter *adapter)
1126 {
1127         struct ixgbe_hw *hw = &adapter->hw;
1128         device_t        dev = adapter->dev;
1129         u64             layer;
1130
1131         layer = adapter->phy_layer;
1132
1133         /* Media types with matching FreeBSD media defines */
1134         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1135                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1136         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1137                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1138         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1139                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1140         if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1141                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1142
1143         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1144             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1145                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1146                     NULL);
1147
1148         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1149                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1150                 if (hw->phy.multispeed_fiber)
1151                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1152                             NULL);
1153         }
1154         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1155                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1156                 if (hw->phy.multispeed_fiber)
1157                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1158                             NULL);
1159         } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1160                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1161         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1162                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1163
1164 #ifdef IFM_ETH_XTYPE
1165         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1166                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1167         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1168                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1169         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1170                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1171         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1172                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1173 #else
1174         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1175                 device_printf(dev, "Media supported: 10GbaseKR\n");
1176                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1177                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1178         }
1179         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1180                 device_printf(dev, "Media supported: 10GbaseKX4\n");
1181                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1182                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1183         }
1184         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1185                 device_printf(dev, "Media supported: 1000baseKX\n");
1186                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1187                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1188         }
1189         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1190                 device_printf(dev, "Media supported: 2500baseKX\n");
1191                 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1192                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1193         }
1194 #endif
1195         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1196                 device_printf(dev, "Media supported: 1000baseBX\n");
1197
1198         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1199                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1200                     0, NULL);
1201                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1202         }
1203
1204         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1205 } /* ixgbe_add_media_types */
1206
1207 /************************************************************************
1208  * ixgbe_is_sfp
1209  ************************************************************************/
1210 static inline bool
1211 ixgbe_is_sfp(struct ixgbe_hw *hw)
1212 {
1213         switch (hw->mac.type) {
1214         case ixgbe_mac_82598EB:
1215                 if (hw->phy.type == ixgbe_phy_nl)
1216                         return TRUE;
1217                 return FALSE;
1218         case ixgbe_mac_82599EB:
1219                 switch (hw->mac.ops.get_media_type(hw)) {
1220                 case ixgbe_media_type_fiber:
1221                 case ixgbe_media_type_fiber_qsfp:
1222                         return TRUE;
1223                 default:
1224                         return FALSE;
1225                 }
1226         case ixgbe_mac_X550EM_x:
1227         case ixgbe_mac_X550EM_a:
1228                 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1229                         return TRUE;
1230                 return FALSE;
1231         default:
1232                 return FALSE;
1233         }
1234 } /* ixgbe_is_sfp */
1235
1236 /************************************************************************
1237  * ixgbe_config_link
1238  ************************************************************************/
1239 static void
1240 ixgbe_config_link(struct adapter *adapter)
1241 {
1242         struct ixgbe_hw *hw = &adapter->hw;
1243         u32             autoneg, err = 0;
1244         bool            sfp, negotiate;
1245
1246         sfp = ixgbe_is_sfp(hw);
1247
1248         if (sfp) {
1249                 if (hw->phy.multispeed_fiber) {
1250                         hw->mac.ops.setup_sfp(hw);
1251                         ixgbe_enable_tx_laser(hw);
1252                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1253                 } else
1254                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1255         } else {
1256                 if (hw->mac.ops.check_link)
1257                         err = ixgbe_check_link(hw, &adapter->link_speed,
1258                             &adapter->link_up, FALSE);
1259                 if (err)
1260                         goto out;
1261                 autoneg = hw->phy.autoneg_advertised;
1262                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1263                         err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1264                             &negotiate);
1265                 if (err)
1266                         goto out;
1267                 if (hw->mac.ops.setup_link)
1268                         err = hw->mac.ops.setup_link(hw, autoneg,
1269                             adapter->link_up);
1270         }
1271 out:
1272
1273         return;
1274 } /* ixgbe_config_link */
1275
1276 /************************************************************************
1277  * ixgbe_update_stats_counters - Update board statistics counters.
1278  ************************************************************************/
1279 static void
1280 ixgbe_update_stats_counters(struct adapter *adapter)
1281 {
1282         struct ixgbe_hw       *hw = &adapter->hw;
1283         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1284         u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1285         u64                   total_missed_rx = 0;
1286
1287         stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1288         stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1289         stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1290         stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1291         stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1292
1293         for (int i = 0; i < 16; i++) {
1294                 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1295                 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1296                 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1297         }
1298         stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1299         stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1300         stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1301
1302         /* Hardware workaround, gprc counts missed packets */
1303         stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1304         stats->gprc -= missed_rx;
1305
1306         if (hw->mac.type != ixgbe_mac_82598EB) {
1307                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1308                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1309                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1310                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1311                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1312                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1313                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1314                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1315         } else {
1316                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1317                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1318                 /* 82598 only has a counter in the high register */
1319                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1320                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1321                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1322         }
1323
1324         /*
1325          * Workaround: mprc hardware is incorrectly counting
1326          * broadcasts, so for now we subtract those.
1327          */
1328         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1329         stats->bprc += bprc;
1330         stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1331         if (hw->mac.type == ixgbe_mac_82598EB)
1332                 stats->mprc -= bprc;
1333
1334         stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1335         stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1336         stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1337         stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1338         stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1339         stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1340
1341         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1342         stats->lxontxc += lxon;
1343         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1344         stats->lxofftxc += lxoff;
1345         total = lxon + lxoff;
1346
1347         stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1348         stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1349         stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1350         stats->gptc -= total;
1351         stats->mptc -= total;
1352         stats->ptc64 -= total;
1353         stats->gotc -= total * ETHER_MIN_LEN;
1354
1355         stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1356         stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1357         stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1358         stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1359         stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1360         stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1361         stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1362         stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1363         stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1364         stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1365         stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1366         stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1367         stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1368         stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1369         stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1370         stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1371         stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1372         stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1373         /* Only read FCOE on 82599 */
1374         if (hw->mac.type != ixgbe_mac_82598EB) {
1375                 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1376                 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1377                 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1378                 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1379                 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1380         }
1381
1382         /* Fill out the OS statistics structure */
1383         IXGBE_SET_IPACKETS(adapter, stats->gprc);
1384         IXGBE_SET_OPACKETS(adapter, stats->gptc);
1385         IXGBE_SET_IBYTES(adapter, stats->gorc);
1386         IXGBE_SET_OBYTES(adapter, stats->gotc);
1387         IXGBE_SET_IMCASTS(adapter, stats->mprc);
1388         IXGBE_SET_OMCASTS(adapter, stats->mptc);
1389         IXGBE_SET_COLLISIONS(adapter, 0);
1390         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1391         IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1392 } /* ixgbe_update_stats_counters */
1393
1394 /************************************************************************
1395  * ixgbe_add_hw_stats
1396  *
1397  *   Add sysctl variables, one per statistic, to the system.
1398  ************************************************************************/
1399 static void
1400 ixgbe_add_hw_stats(struct adapter *adapter)
1401 {
1402         device_t               dev = adapter->dev;
1403         struct tx_ring         *txr = adapter->tx_rings;
1404         struct rx_ring         *rxr = adapter->rx_rings;
1405         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1406         struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1407         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1408         struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1409         struct sysctl_oid      *stat_node, *queue_node;
1410         struct sysctl_oid_list *stat_list, *queue_list;
1411
1412 #define QUEUE_NAME_LEN 32
1413         char                   namebuf[QUEUE_NAME_LEN];
1414
1415         /* Driver Statistics */
1416         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1417             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1418         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1419             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1420         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1421             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1422         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1423             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1424
1425         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1426                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1427                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1428                     CTLFLAG_RD, NULL, "Queue Name");
1429                 queue_list = SYSCTL_CHILDREN(queue_node);
1430
1431                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1432                     CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1433                     sizeof(&adapter->queues[i]),
1434                     ixgbe_sysctl_interrupt_rate_handler, "IU",
1435                     "Interrupt Rate");
1436                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1437                     CTLFLAG_RD, &(adapter->queues[i].irqs),
1438                     "irqs on this queue");
1439                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1440                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1441                     ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1442                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1443                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1444                     ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1445                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1446                     CTLFLAG_RD, &txr->tso_tx, "TSO");
1447                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1448                     CTLFLAG_RD, &txr->no_tx_dma_setup,
1449                     "Driver tx dma failure in xmit");
1450                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1451                     CTLFLAG_RD, &txr->no_desc_avail,
1452                     "Queue No Descriptor Available");
1453                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1454                     CTLFLAG_RD, &txr->total_packets,
1455                     "Queue Packets Transmitted");
1456                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1457                     CTLFLAG_RD, &txr->br->br_drops,
1458                     "Packets dropped in buf_ring");
1459         }
1460
1461         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1462                 struct lro_ctrl *lro = &rxr->lro;
1463
1464                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1465                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1466                     CTLFLAG_RD, NULL, "Queue Name");
1467                 queue_list = SYSCTL_CHILDREN(queue_node);
1468
1469                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1470                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1471                     ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1472                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1473                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1474                     ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1475                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1476                     CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1477                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1478                     CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1479                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1480                     CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1481                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1482                     CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1483                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1484                     CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1485                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1486                     CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1487         }
1488
1489         /* MAC stats get their own sub node */
1490
1491         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1492             CTLFLAG_RD, NULL, "MAC Statistics");
1493         stat_list = SYSCTL_CHILDREN(stat_node);
1494
1495         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1496             CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1497         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1498             CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1499         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1500             CTLFLAG_RD, &stats->errbc, "Byte Errors");
1501         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1502             CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1503         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1504             CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1505         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1506             CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1507         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1508             CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1509         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1510             CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1511
1512         /* Flow Control stats */
1513         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1514             CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1515         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1516             CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1517         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1518             CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1519         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1520             CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1521
1522         /* Packet Reception Stats */
1523         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1524             CTLFLAG_RD, &stats->tor, "Total Octets Received");
1525         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1526             CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1527         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1528             CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1529         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1530             CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1531         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1532             CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1533         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1534             CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1535         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1536             CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1537         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1538             CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1539         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1540             CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1541         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1542             CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1543         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1544             CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1545         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1546             CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1547         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1548             CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1549         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1550             CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1551         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1552             CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1553         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1554             CTLFLAG_RD, &stats->rjc, "Received Jabber");
1555         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1556             CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1557         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1558             CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1559         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1560             CTLFLAG_RD, &stats->xec, "Checksum Errors");
1561
1562         /* Packet Transmission Stats */
1563         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1564             CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1565         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1566             CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1567         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1568             CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1569         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1570             CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1571         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1572             CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1573         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1574             CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1575         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1576             CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1577         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1578             CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1579         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1580             CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1581         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1582             CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1583         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1584             CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1585         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1586             CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1587 } /* ixgbe_add_hw_stats */
1588
1589 /************************************************************************
1590  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1591  *
1592  *   Retrieves the TDH value from the hardware
1593  ************************************************************************/
1594 static int
1595 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1596 {
1597         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1598         int            error;
1599         unsigned int   val;
1600
1601         if (!txr)
1602                 return (0);
1603
1604         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1605         error = sysctl_handle_int(oidp, &val, 0, req);
1606         if (error || !req->newptr)
1607                 return error;
1608
1609         return (0);
1610 } /* ixgbe_sysctl_tdh_handler */
1611
1612 /************************************************************************
1613  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1614  *
1615  *   Retrieves the TDT value from the hardware
1616  ************************************************************************/
1617 static int
1618 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1619 {
1620         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1621         int            error;
1622         unsigned int   val;
1623
1624         if (!txr)
1625                 return (0);
1626
1627         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1628         error = sysctl_handle_int(oidp, &val, 0, req);
1629         if (error || !req->newptr)
1630                 return error;
1631
1632         return (0);
1633 } /* ixgbe_sysctl_tdt_handler */
1634
1635 /************************************************************************
1636  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1637  *
1638  *   Retrieves the RDH value from the hardware
1639  ************************************************************************/
1640 static int
1641 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1642 {
1643         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1644         int            error;
1645         unsigned int   val;
1646
1647         if (!rxr)
1648                 return (0);
1649
1650         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1651         error = sysctl_handle_int(oidp, &val, 0, req);
1652         if (error || !req->newptr)
1653                 return error;
1654
1655         return (0);
1656 } /* ixgbe_sysctl_rdh_handler */
1657
1658 /************************************************************************
1659  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1660  *
1661  *   Retrieves the RDT value from the hardware
1662  ************************************************************************/
1663 static int
1664 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1665 {
1666         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1667         int            error;
1668         unsigned int   val;
1669
1670         if (!rxr)
1671                 return (0);
1672
1673         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1674         error = sysctl_handle_int(oidp, &val, 0, req);
1675         if (error || !req->newptr)
1676                 return error;
1677
1678         return (0);
1679 } /* ixgbe_sysctl_rdt_handler */
1680
1681 /************************************************************************
1682  * ixgbe_register_vlan
1683  *
1684  *   Run via vlan config EVENT, it enables us to use the
1685  *   HW Filter table since we can get the vlan id. This
1686  *   just creates the entry in the soft version of the
1687  *   VFTA, init will repopulate the real table.
1688  ************************************************************************/
1689 static void
1690 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1691 {
1692         struct adapter *adapter = ifp->if_softc;
1693         u16            index, bit;
1694
1695         if (ifp->if_softc != arg)   /* Not our event */
1696                 return;
1697
1698         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1699                 return;
1700
1701         IXGBE_CORE_LOCK(adapter);
1702         index = (vtag >> 5) & 0x7F;
1703         bit = vtag & 0x1F;
1704         adapter->shadow_vfta[index] |= (1 << bit);
1705         ++adapter->num_vlans;
1706         ixgbe_setup_vlan_hw_support(adapter);
1707         IXGBE_CORE_UNLOCK(adapter);
1708 } /* ixgbe_register_vlan */
1709
1710 /************************************************************************
1711  * ixgbe_unregister_vlan
1712  *
1713  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1714  ************************************************************************/
1715 static void
1716 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1717 {
1718         struct adapter *adapter = ifp->if_softc;
1719         u16            index, bit;
1720
1721         if (ifp->if_softc != arg)
1722                 return;
1723
1724         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1725                 return;
1726
1727         IXGBE_CORE_LOCK(adapter);
1728         index = (vtag >> 5) & 0x7F;
1729         bit = vtag & 0x1F;
1730         adapter->shadow_vfta[index] &= ~(1 << bit);
1731         --adapter->num_vlans;
1732         /* Re-init to load the changes */
1733         ixgbe_setup_vlan_hw_support(adapter);
1734         IXGBE_CORE_UNLOCK(adapter);
1735 } /* ixgbe_unregister_vlan */
1736
1737 /************************************************************************
1738  * ixgbe_setup_vlan_hw_support
1739  ************************************************************************/
1740 static void
1741 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1742 {
1743         struct ifnet    *ifp = adapter->ifp;
1744         struct ixgbe_hw *hw = &adapter->hw;
1745         struct rx_ring  *rxr;
1746         int             i;
1747         u32             ctrl;
1748
1749
1750         /*
1751          * We get here thru init_locked, meaning
1752          * a soft reset, this has already cleared
1753          * the VFTA and other state, so if there
1754          * have been no vlan's registered do nothing.
1755          */
1756         if (adapter->num_vlans == 0)
1757                 return;
1758
1759         /* Setup the queues for vlans */
1760         for (i = 0; i < adapter->num_queues; i++) {
1761                 rxr = &adapter->rx_rings[i];
1762                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1763                 if (hw->mac.type != ixgbe_mac_82598EB) {
1764                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1765                         ctrl |= IXGBE_RXDCTL_VME;
1766                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1767                 }
1768                 rxr->vtag_strip = TRUE;
1769         }
1770
1771         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1772                 return;
1773         /*
1774          * A soft reset zero's out the VFTA, so
1775          * we need to repopulate it now.
1776          */
1777         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1778                 if (adapter->shadow_vfta[i] != 0)
1779                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1780                             adapter->shadow_vfta[i]);
1781
1782         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1783         /* Enable the Filter Table if enabled */
1784         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1785                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1786                 ctrl |= IXGBE_VLNCTRL_VFE;
1787         }
1788         if (hw->mac.type == ixgbe_mac_82598EB)
1789                 ctrl |= IXGBE_VLNCTRL_VME;
1790         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1791 } /* ixgbe_setup_vlan_hw_support */
1792
1793 /************************************************************************
1794  * ixgbe_get_slot_info
1795  *
1796  *   Get the width and transaction speed of
1797  *   the slot this adapter is plugged into.
1798  ************************************************************************/
1799 static void
1800 ixgbe_get_slot_info(struct adapter *adapter)
1801 {
1802         device_t              dev = adapter->dev;
1803         struct ixgbe_hw       *hw = &adapter->hw;
1804         u32                   offset;
1805         u16                   link;
1806         int                   bus_info_valid = TRUE;
1807
1808         /* Some devices are behind an internal bridge */
1809         switch (hw->device_id) {
1810         case IXGBE_DEV_ID_82599_SFP_SF_QP:
1811         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1812                 goto get_parent_info;
1813         default:
1814                 break;
1815         }
1816
1817         ixgbe_get_bus_info(hw);
1818
1819         /*
1820          * Some devices don't use PCI-E, but there is no need
1821          * to display "Unknown" for bus speed and width.
1822          */
1823         switch (hw->mac.type) {
1824         case ixgbe_mac_X550EM_x:
1825         case ixgbe_mac_X550EM_a:
1826                 return;
1827         default:
1828                 goto display;
1829         }
1830
1831 get_parent_info:
1832         /*
1833          * For the Quad port adapter we need to parse back
1834          * up the PCI tree to find the speed of the expansion
1835          * slot into which this adapter is plugged. A bit more work.
1836          */
1837         dev = device_get_parent(device_get_parent(dev));
1838 #ifdef IXGBE_DEBUG
1839         device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1840             pci_get_slot(dev), pci_get_function(dev));
1841 #endif
1842         dev = device_get_parent(device_get_parent(dev));
1843 #ifdef IXGBE_DEBUG
1844         device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1845             pci_get_slot(dev), pci_get_function(dev));
1846 #endif
1847         /* Now get the PCI Express Capabilities offset */
1848         if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1849                 /*
1850                  * Hmm...can't get PCI-Express capabilities.
1851                  * Falling back to default method.
1852                  */
1853                 bus_info_valid = FALSE;
1854                 ixgbe_get_bus_info(hw);
1855                 goto display;
1856         }
1857         /* ...and read the Link Status Register */
1858         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1859         ixgbe_set_pci_config_data_generic(hw, link);
1860
1861 display:
1862         device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1863             ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1864              (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1865              (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1866              "Unknown"),
1867             ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1868              (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1869              (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1870              "Unknown"));
1871
1872         if (bus_info_valid) {
1873                 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1874                     ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1875                     (hw->bus.speed == ixgbe_bus_speed_2500))) {
1876                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1877                         device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1878                 }
1879                 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1880                     ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1881                     (hw->bus.speed < ixgbe_bus_speed_8000))) {
1882                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1883                         device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1884                 }
1885         } else
1886                 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1887
1888         return;
1889 } /* ixgbe_get_slot_info */
1890
1891 /************************************************************************
1892  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1893  ************************************************************************/
1894 static inline void
1895 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1896 {
1897         struct ixgbe_hw *hw = &adapter->hw;
1898         u64             queue = (u64)(1 << vector);
1899         u32             mask;
1900
1901         if (hw->mac.type == ixgbe_mac_82598EB) {
1902                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1903                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1904         } else {
1905                 mask = (queue & 0xFFFFFFFF);
1906                 if (mask)
1907                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1908                 mask = (queue >> 32);
1909                 if (mask)
1910                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1911         }
1912 } /* ixgbe_enable_queue */
1913
1914 /************************************************************************
1915  * ixgbe_disable_queue
1916  ************************************************************************/
1917 static inline void
1918 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1919 {
1920         struct ixgbe_hw *hw = &adapter->hw;
1921         u64             queue = (u64)(1 << vector);
1922         u32             mask;
1923
1924         if (hw->mac.type == ixgbe_mac_82598EB) {
1925                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1926                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1927         } else {
1928                 mask = (queue & 0xFFFFFFFF);
1929                 if (mask)
1930                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1931                 mask = (queue >> 32);
1932                 if (mask)
1933                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1934         }
1935 } /* ixgbe_disable_queue */
1936
1937 /************************************************************************
1938  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1939  ************************************************************************/
1940 void
1941 ixgbe_msix_que(void *arg)
1942 {
1943         struct ix_queue *que = arg;
1944         struct adapter  *adapter = que->adapter;
1945         struct ifnet    *ifp = adapter->ifp;
1946         struct tx_ring  *txr = que->txr;
1947         struct rx_ring  *rxr = que->rxr;
1948         bool            more;
1949         u32             newitr = 0;
1950
1951
1952         /* Protect against spurious interrupts */
1953         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1954                 return;
1955
1956         ixgbe_disable_queue(adapter, que->msix);
1957         ++que->irqs;
1958
1959         more = ixgbe_rxeof(que);
1960
1961         IXGBE_TX_LOCK(txr);
1962         ixgbe_txeof(txr);
1963         if (!ixgbe_ring_empty(ifp, txr->br))
1964                 ixgbe_start_locked(ifp, txr);
1965         IXGBE_TX_UNLOCK(txr);
1966
1967         /* Do AIM now? */
1968
1969         if (adapter->enable_aim == FALSE)
1970                 goto no_calc;
1971         /*
1972          * Do Adaptive Interrupt Moderation:
1973          *  - Write out last calculated setting
1974          *  - Calculate based on average size over
1975          *    the last interval.
1976          */
1977         if (que->eitr_setting)
1978                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1979                     que->eitr_setting);
1980
1981         que->eitr_setting = 0;
1982
1983         /* Idle, do nothing */
1984         if ((txr->bytes == 0) && (rxr->bytes == 0))
1985                 goto no_calc;
1986
1987         if ((txr->bytes) && (txr->packets))
1988                 newitr = txr->bytes/txr->packets;
1989         if ((rxr->bytes) && (rxr->packets))
1990                 newitr = max(newitr, (rxr->bytes / rxr->packets));
1991         newitr += 24; /* account for hardware frame, crc */
1992
1993         /* set an upper boundary */
1994         newitr = min(newitr, 3000);
1995
1996         /* Be nice to the mid range */
1997         if ((newitr > 300) && (newitr < 1200))
1998                 newitr = (newitr / 3);
1999         else
2000                 newitr = (newitr / 2);
2001
2002         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2003                 newitr |= newitr << 16;
2004         else
2005                 newitr |= IXGBE_EITR_CNT_WDIS;
2006
2007         /* save for next interrupt */
2008         que->eitr_setting = newitr;
2009
2010         /* Reset state */
2011         txr->bytes = 0;
2012         txr->packets = 0;
2013         rxr->bytes = 0;
2014         rxr->packets = 0;
2015
2016 no_calc:
2017         if (more)
2018                 taskqueue_enqueue(que->tq, &que->que_task);
2019         else
2020                 ixgbe_enable_queue(adapter, que->msix);
2021
2022         return;
2023 } /* ixgbe_msix_que */
2024
2025 /************************************************************************
2026  * ixgbe_media_status - Media Ioctl callback
2027  *
2028  *   Called whenever the user queries the status of
2029  *   the interface using ifconfig.
2030  ************************************************************************/
2031 static void
2032 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2033 {
2034         struct adapter  *adapter = ifp->if_softc;
2035         struct ixgbe_hw *hw = &adapter->hw;
2036         int             layer;
2037
2038         INIT_DEBUGOUT("ixgbe_media_status: begin");
2039         IXGBE_CORE_LOCK(adapter);
2040         ixgbe_update_link_status(adapter);
2041
2042         ifmr->ifm_status = IFM_AVALID;
2043         ifmr->ifm_active = IFM_ETHER;
2044
2045         if (!adapter->link_active) {
2046                 IXGBE_CORE_UNLOCK(adapter);
2047                 return;
2048         }
2049
2050         ifmr->ifm_status |= IFM_ACTIVE;
2051         layer = adapter->phy_layer;
2052
2053         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2054             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2055             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2056             layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2057                 switch (adapter->link_speed) {
2058                 case IXGBE_LINK_SPEED_10GB_FULL:
2059                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2060                         break;
2061                 case IXGBE_LINK_SPEED_1GB_FULL:
2062                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2063                         break;
2064                 case IXGBE_LINK_SPEED_100_FULL:
2065                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2066                         break;
2067                 case IXGBE_LINK_SPEED_10_FULL:
2068                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2069                         break;
2070                 }
2071         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2072             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2073                 switch (adapter->link_speed) {
2074                 case IXGBE_LINK_SPEED_10GB_FULL:
2075                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2076                         break;
2077                 }
2078         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2079                 switch (adapter->link_speed) {
2080                 case IXGBE_LINK_SPEED_10GB_FULL:
2081                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2082                         break;
2083                 case IXGBE_LINK_SPEED_1GB_FULL:
2084                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2085                         break;
2086                 }
2087         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2088                 switch (adapter->link_speed) {
2089                 case IXGBE_LINK_SPEED_10GB_FULL:
2090                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2091                         break;
2092                 case IXGBE_LINK_SPEED_1GB_FULL:
2093                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2094                         break;
2095                 }
2096         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2097             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2098                 switch (adapter->link_speed) {
2099                 case IXGBE_LINK_SPEED_10GB_FULL:
2100                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2101                         break;
2102                 case IXGBE_LINK_SPEED_1GB_FULL:
2103                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2104                         break;
2105                 }
2106         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2107                 switch (adapter->link_speed) {
2108                 case IXGBE_LINK_SPEED_10GB_FULL:
2109                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2110                         break;
2111                 }
2112         /*
2113          * XXX: These need to use the proper media types once
2114          * they're added.
2115          */
2116 #ifndef IFM_ETH_XTYPE
2117         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2118                 switch (adapter->link_speed) {
2119                 case IXGBE_LINK_SPEED_10GB_FULL:
2120                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2121                         break;
2122                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2123                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2124                         break;
2125                 case IXGBE_LINK_SPEED_1GB_FULL:
2126                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2127                         break;
2128                 }
2129         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2130             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2131             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2132                 switch (adapter->link_speed) {
2133                 case IXGBE_LINK_SPEED_10GB_FULL:
2134                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2135                         break;
2136                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2137                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2138                         break;
2139                 case IXGBE_LINK_SPEED_1GB_FULL:
2140                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2141                         break;
2142                 }
2143 #else
2144         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2145                 switch (adapter->link_speed) {
2146                 case IXGBE_LINK_SPEED_10GB_FULL:
2147                         ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2148                         break;
2149                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2150                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2151                         break;
2152                 case IXGBE_LINK_SPEED_1GB_FULL:
2153                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2154                         break;
2155                 }
2156         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2157             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2158             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2159                 switch (adapter->link_speed) {
2160                 case IXGBE_LINK_SPEED_10GB_FULL:
2161                         ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2162                         break;
2163                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2164                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2165                         break;
2166                 case IXGBE_LINK_SPEED_1GB_FULL:
2167                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2168                         break;
2169                 }
2170 #endif
2171
2172         /* If nothing is recognized... */
2173         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2174                 ifmr->ifm_active |= IFM_UNKNOWN;
2175
2176 #if __FreeBSD_version >= 900025
2177         /* Display current flow control setting used on link */
2178         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2179             hw->fc.current_mode == ixgbe_fc_full)
2180                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2181         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2182             hw->fc.current_mode == ixgbe_fc_full)
2183                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2184 #endif
2185
2186         IXGBE_CORE_UNLOCK(adapter);
2187
2188         return;
2189 } /* ixgbe_media_status */
2190
2191 /************************************************************************
2192  * ixgbe_media_change - Media Ioctl callback
2193  *
2194  *   Called when the user changes speed/duplex using
2195  *   media/mediopt option with ifconfig.
2196  ************************************************************************/
2197 static int
2198 ixgbe_media_change(struct ifnet *ifp)
2199 {
2200         struct adapter   *adapter = ifp->if_softc;
2201         struct ifmedia   *ifm = &adapter->media;
2202         struct ixgbe_hw  *hw = &adapter->hw;
2203         ixgbe_link_speed speed = 0;
2204
2205         INIT_DEBUGOUT("ixgbe_media_change: begin");
2206
2207         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2208                 return (EINVAL);
2209
2210         if (hw->phy.media_type == ixgbe_media_type_backplane)
2211                 return (ENODEV);
2212
2213         /*
2214          * We don't actually need to check against the supported
2215          * media types of the adapter; ifmedia will take care of
2216          * that for us.
2217          */
2218         switch (IFM_SUBTYPE(ifm->ifm_media)) {
2219                 case IFM_AUTO:
2220                 case IFM_10G_T:
2221                         speed |= IXGBE_LINK_SPEED_100_FULL;
2222                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2223                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2224                         break;
2225                 case IFM_10G_LRM:
2226                 case IFM_10G_LR:
2227 #ifndef IFM_ETH_XTYPE
2228                 case IFM_10G_SR: /* KR, too */
2229                 case IFM_10G_CX4: /* KX4 */
2230 #else
2231                 case IFM_10G_KR:
2232                 case IFM_10G_KX4:
2233 #endif
2234                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2235                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2236                         break;
2237 #ifndef IFM_ETH_XTYPE
2238                 case IFM_1000_CX: /* KX */
2239 #else
2240                 case IFM_1000_KX:
2241 #endif
2242                 case IFM_1000_LX:
2243                 case IFM_1000_SX:
2244                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2245                         break;
2246                 case IFM_1000_T:
2247                         speed |= IXGBE_LINK_SPEED_100_FULL;
2248                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2249                         break;
2250                 case IFM_10G_TWINAX:
2251                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2252                         break;
2253                 case IFM_100_TX:
2254                         speed |= IXGBE_LINK_SPEED_100_FULL;
2255                         break;
2256                 case IFM_10_T:
2257                         speed |= IXGBE_LINK_SPEED_10_FULL;
2258                         break;
2259                 default:
2260                         goto invalid;
2261         }
2262
2263         hw->mac.autotry_restart = TRUE;
2264         hw->mac.ops.setup_link(hw, speed, TRUE);
2265         adapter->advertise =
2266             ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2267             ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2268             ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2269             ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2270
2271         return (0);
2272
2273 invalid:
2274         device_printf(adapter->dev, "Invalid media type!\n");
2275
2276         return (EINVAL);
2277 } /* ixgbe_media_change */
2278
2279 /************************************************************************
2280  * ixgbe_set_promisc
2281  ************************************************************************/
2282 static void
2283 ixgbe_set_promisc(struct adapter *adapter)
2284 {
2285         struct ifnet *ifp = adapter->ifp;
2286         int          mcnt = 0;
2287         u32          rctl;
2288
2289         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2290         rctl &= (~IXGBE_FCTRL_UPE);
2291         if (ifp->if_flags & IFF_ALLMULTI)
2292                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2293         else {
2294                 struct ifmultiaddr *ifma;
2295 #if __FreeBSD_version < 800000
2296                 IF_ADDR_LOCK(ifp);
2297 #else
2298                 if_maddr_rlock(ifp);
2299 #endif
2300                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2301                         if (ifma->ifma_addr->sa_family != AF_LINK)
2302                                 continue;
2303                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2304                                 break;
2305                         mcnt++;
2306                 }
2307 #if __FreeBSD_version < 800000
2308                 IF_ADDR_UNLOCK(ifp);
2309 #else
2310                 if_maddr_runlock(ifp);
2311 #endif
2312         }
2313         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2314                 rctl &= (~IXGBE_FCTRL_MPE);
2315         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2316
2317         if (ifp->if_flags & IFF_PROMISC) {
2318                 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2319                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2320         } else if (ifp->if_flags & IFF_ALLMULTI) {
2321                 rctl |= IXGBE_FCTRL_MPE;
2322                 rctl &= ~IXGBE_FCTRL_UPE;
2323                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2324         }
2325 } /* ixgbe_set_promisc */
2326
2327 /************************************************************************
2328  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2329  ************************************************************************/
2330 static void
2331 ixgbe_msix_link(void *arg)
2332 {
2333         struct adapter  *adapter = arg;
2334         struct ixgbe_hw *hw = &adapter->hw;
2335         u32             eicr, eicr_mask;
2336         s32             retval;
2337
2338         ++adapter->link_irq;
2339
2340         /* Pause other interrupts */
2341         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2342
2343         /* First get the cause */
2344         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2345         /* Be sure the queue bits are not cleared */
2346         eicr &= ~IXGBE_EICR_RTX_QUEUE;
2347         /* Clear interrupt with write */
2348         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2349
2350         /* Link status change */
2351         if (eicr & IXGBE_EICR_LSC) {
2352                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2353                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
2354         }
2355
2356         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2357                 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2358                     (eicr & IXGBE_EICR_FLOW_DIR)) {
2359                         /* This is probably overkill :) */
2360                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2361                                 return;
2362                         /* Disable the interrupt */
2363                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2364                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
2365                 }
2366
2367                 if (eicr & IXGBE_EICR_ECC) {
2368                         device_printf(adapter->dev,
2369                             "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2370                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2371                 }
2372
2373                 /* Check for over temp condition */
2374                 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2375                         switch (adapter->hw.mac.type) {
2376                         case ixgbe_mac_X550EM_a:
2377                                 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2378                                         break;
2379                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2380                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2381                                 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2382                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2383                                 retval = hw->phy.ops.check_overtemp(hw);
2384                                 if (retval != IXGBE_ERR_OVERTEMP)
2385                                         break;
2386                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2387                                 device_printf(adapter->dev, "System shutdown required!\n");
2388                                 break;
2389                         default:
2390                                 if (!(eicr & IXGBE_EICR_TS))
2391                                         break;
2392                                 retval = hw->phy.ops.check_overtemp(hw);
2393                                 if (retval != IXGBE_ERR_OVERTEMP)
2394                                         break;
2395                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2396                                 device_printf(adapter->dev, "System shutdown required!\n");
2397                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2398                                 break;
2399                         }
2400                 }
2401
2402                 /* Check for VF message */
2403                 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2404                     (eicr & IXGBE_EICR_MAILBOX))
2405                         taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
2406         }
2407
2408         if (ixgbe_is_sfp(hw)) {
2409                 /* Pluggable optics-related interrupt */
2410                 if (hw->mac.type >= ixgbe_mac_X540)
2411                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2412                 else
2413                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2414
2415                 if (eicr & eicr_mask) {
2416                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2417                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2418                 }
2419
2420                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2421                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2422                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2423                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2424                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2425                 }
2426         }
2427
2428         /* Check for fan failure */
2429         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2430                 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2431                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2432         }
2433
2434         /* External PHY interrupt */
2435         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2436             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2437                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2438                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
2439         }
2440
2441         /* Re-enable other interrupts */
2442         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
2443 } /* ixgbe_msix_link */
2444
2445 /************************************************************************
2446  * ixgbe_sysctl_interrupt_rate_handler
2447  ************************************************************************/
2448 static int
2449 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2450 {
2451         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2452         int             error;
2453         unsigned int    reg, usec, rate;
2454
2455         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2456         usec = ((reg & 0x0FF8) >> 3);
2457         if (usec > 0)
2458                 rate = 500000 / usec;
2459         else
2460                 rate = 0;
2461         error = sysctl_handle_int(oidp, &rate, 0, req);
2462         if (error || !req->newptr)
2463                 return error;
2464         reg &= ~0xfff; /* default, no limitation */
2465         ixgbe_max_interrupt_rate = 0;
2466         if (rate > 0 && rate < 500000) {
2467                 if (rate < 1000)
2468                         rate = 1000;
2469                 ixgbe_max_interrupt_rate = rate;
2470                 reg |= ((4000000/rate) & 0xff8);
2471         }
2472         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2473
2474         return (0);
2475 } /* ixgbe_sysctl_interrupt_rate_handler */
2476
2477 /************************************************************************
2478  * ixgbe_add_device_sysctls
2479  ************************************************************************/
2480 static void
2481 ixgbe_add_device_sysctls(struct adapter *adapter)
2482 {
2483         device_t               dev = adapter->dev;
2484         struct ixgbe_hw        *hw = &adapter->hw;
2485         struct sysctl_oid_list *child;
2486         struct sysctl_ctx_list *ctx;
2487
2488         ctx = device_get_sysctl_ctx(dev);
2489         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2490
2491         /* Sysctls for all devices */
2492         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2493             adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2494
2495         adapter->enable_aim = ixgbe_enable_aim;
2496         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2497             &adapter->enable_aim, 1, "Interrupt Moderation");
2498
2499         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2500             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2501             IXGBE_SYSCTL_DESC_ADV_SPEED);
2502
2503 #ifdef IXGBE_DEBUG
2504         /* testing sysctls (for all devices) */
2505         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2506             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2507             "I", "PCI Power State");
2508
2509         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2510             CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2511             ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2512 #endif
2513         /* for X550 series devices */
2514         if (hw->mac.type >= ixgbe_mac_X550)
2515                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2516                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2517                     "I", "DMA Coalesce");
2518
2519         /* for WoL-capable devices */
2520         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2521                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2522                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2523                     ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2524
2525                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2526                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2527                     "I", "Enable/Disable Wake Up Filters");
2528         }
2529
2530         /* for X552/X557-AT devices */
2531         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2532                 struct sysctl_oid *phy_node;
2533                 struct sysctl_oid_list *phy_list;
2534
2535                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2536                     CTLFLAG_RD, NULL, "External PHY sysctls");
2537                 phy_list = SYSCTL_CHILDREN(phy_node);
2538
2539                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2540                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2541                     "I", "Current External PHY Temperature (Celsius)");
2542
2543                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2544                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2545                     ixgbe_sysctl_phy_overtemp_occurred, "I",
2546                     "External PHY High Temperature Event Occurred");
2547         }
2548
2549         if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2550                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2551                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2552                     ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2553         }
2554 } /* ixgbe_add_device_sysctls */
2555
2556 /************************************************************************
2557  * ixgbe_allocate_pci_resources
2558  ************************************************************************/
2559 static int
2560 ixgbe_allocate_pci_resources(struct adapter *adapter)
2561 {
2562         device_t dev = adapter->dev;
2563         int      rid;
2564
2565         rid = PCIR_BAR(0);
2566         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2567             RF_ACTIVE);
2568
2569         if (!(adapter->pci_mem)) {
2570                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2571                 return (ENXIO);
2572         }
2573
2574         /* Save bus_space values for READ/WRITE_REG macros */
2575         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2576         adapter->osdep.mem_bus_space_handle =
2577             rman_get_bushandle(adapter->pci_mem);
2578         /* Set hw values for shared code */
2579         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2580
2581         return (0);
2582 } /* ixgbe_allocate_pci_resources */
2583
2584 /************************************************************************
2585  * ixgbe_detach - Device removal routine
2586  *
2587  *   Called when the driver is being removed.
2588  *   Stops the adapter and deallocates all the resources
2589  *   that were allocated for driver operation.
2590  *
2591  *   return 0 on success, positive on failure
2592  ************************************************************************/
2593 static int
2594 ixgbe_detach(device_t dev)
2595 {
2596         struct adapter  *adapter = device_get_softc(dev);
2597         struct ix_queue *que = adapter->queues;
2598         struct tx_ring  *txr = adapter->tx_rings;
2599         u32             ctrl_ext;
2600
2601         INIT_DEBUGOUT("ixgbe_detach: begin");
2602
2603         /* Make sure VLANS are not using driver */
2604         if (adapter->ifp->if_vlantrunk != NULL) {
2605                 device_printf(dev, "Vlan in use, detach first\n");
2606                 return (EBUSY);
2607         }
2608
2609         if (ixgbe_pci_iov_detach(dev) != 0) {
2610                 device_printf(dev, "SR-IOV in use; detach first.\n");
2611                 return (EBUSY);
2612         }
2613
2614         ether_ifdetach(adapter->ifp);
2615         /* Stop the adapter */
2616         IXGBE_CORE_LOCK(adapter);
2617         ixgbe_setup_low_power_mode(adapter);
2618         IXGBE_CORE_UNLOCK(adapter);
2619
2620         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2621                 if (que->tq) {
2622                         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2623                                 taskqueue_drain(que->tq, &txr->txq_task);
2624                         taskqueue_drain(que->tq, &que->que_task);
2625                         taskqueue_free(que->tq);
2626                 }
2627         }
2628
2629         /* Drain the Link queue */
2630         if (adapter->tq) {
2631                 taskqueue_drain(adapter->tq, &adapter->link_task);
2632                 taskqueue_drain(adapter->tq, &adapter->mod_task);
2633                 taskqueue_drain(adapter->tq, &adapter->msf_task);
2634                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
2635                         taskqueue_drain(adapter->tq, &adapter->mbx_task);
2636                 taskqueue_drain(adapter->tq, &adapter->phy_task);
2637                 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
2638                         taskqueue_drain(adapter->tq, &adapter->fdir_task);
2639                 taskqueue_free(adapter->tq);
2640         }
2641
2642         /* let hardware know driver is unloading */
2643         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2644         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2645         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2646
2647         /* Unregister VLAN events */
2648         if (adapter->vlan_attach != NULL)
2649                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2650         if (adapter->vlan_detach != NULL)
2651                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2652
2653         callout_drain(&adapter->timer);
2654
2655         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2656                 netmap_detach(adapter->ifp);
2657
2658         ixgbe_free_pci_resources(adapter);
2659         bus_generic_detach(dev);
2660         if_free(adapter->ifp);
2661
2662         ixgbe_free_transmit_structures(adapter);
2663         ixgbe_free_receive_structures(adapter);
2664         free(adapter->queues, M_DEVBUF);
2665         free(adapter->mta, M_IXGBE);
2666
2667         IXGBE_CORE_LOCK_DESTROY(adapter);
2668
2669         return (0);
2670 } /* ixgbe_detach */
2671
2672 /************************************************************************
2673  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2674  *
2675  *   Prepare the adapter/port for LPLU and/or WoL
2676  ************************************************************************/
2677 static int
2678 ixgbe_setup_low_power_mode(struct adapter *adapter)
2679 {
2680         struct ixgbe_hw *hw = &adapter->hw;
2681         device_t        dev = adapter->dev;
2682         s32             error = 0;
2683
2684         mtx_assert(&adapter->core_mtx, MA_OWNED);
2685
2686         /* Limit power management flow to X550EM baseT */
2687         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2688             hw->phy.ops.enter_lplu) {
2689                 /* Turn off support for APM wakeup. (Using ACPI instead) */
2690                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2691                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2692
2693                 /*
2694                  * Clear Wake Up Status register to prevent any previous wakeup
2695                  * events from waking us up immediately after we suspend.
2696                  */
2697                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2698
2699                 /*
2700                  * Program the Wakeup Filter Control register with user filter
2701                  * settings
2702                  */
2703                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2704
2705                 /* Enable wakeups and power management in Wakeup Control */
2706                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2707                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2708
2709                 /* X550EM baseT adapters need a special LPLU flow */
2710                 hw->phy.reset_disable = true;
2711                 ixgbe_stop(adapter);
2712                 error = hw->phy.ops.enter_lplu(hw);
2713                 if (error)
2714                         device_printf(dev, "Error entering LPLU: %d\n", error);
2715                 hw->phy.reset_disable = false;
2716         } else {
2717                 /* Just stop for other adapters */
2718                 ixgbe_stop(adapter);
2719         }
2720
2721         return error;
2722 } /* ixgbe_setup_low_power_mode */
2723
2724 /************************************************************************
2725  * ixgbe_shutdown - Shutdown entry point
2726  ************************************************************************/
2727 static int
2728 ixgbe_shutdown(device_t dev)
2729 {
2730         struct adapter *adapter = device_get_softc(dev);
2731         int            error = 0;
2732
2733         INIT_DEBUGOUT("ixgbe_shutdown: begin");
2734
2735         IXGBE_CORE_LOCK(adapter);
2736         error = ixgbe_setup_low_power_mode(adapter);
2737         IXGBE_CORE_UNLOCK(adapter);
2738
2739         return (error);
2740 } /* ixgbe_shutdown */
2741
2742 /************************************************************************
2743  * ixgbe_suspend
2744  *
2745  *   From D0 to D3
2746  ************************************************************************/
2747 static int
2748 ixgbe_suspend(device_t dev)
2749 {
2750         struct adapter *adapter = device_get_softc(dev);
2751         int            error = 0;
2752
2753         INIT_DEBUGOUT("ixgbe_suspend: begin");
2754
2755         IXGBE_CORE_LOCK(adapter);
2756
2757         error = ixgbe_setup_low_power_mode(adapter);
2758
2759         IXGBE_CORE_UNLOCK(adapter);
2760
2761         return (error);
2762 } /* ixgbe_suspend */
2763
2764 /************************************************************************
2765  * ixgbe_resume
2766  *
2767  *   From D3 to D0
2768  ************************************************************************/
2769 static int
2770 ixgbe_resume(device_t dev)
2771 {
2772         struct adapter  *adapter = device_get_softc(dev);
2773         struct ifnet    *ifp = adapter->ifp;
2774         struct ixgbe_hw *hw = &adapter->hw;
2775         u32             wus;
2776
2777         INIT_DEBUGOUT("ixgbe_resume: begin");
2778
2779         IXGBE_CORE_LOCK(adapter);
2780
2781         /* Read & clear WUS register */
2782         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2783         if (wus)
2784                 device_printf(dev, "Woken up by (WUS): %#010x\n",
2785                     IXGBE_READ_REG(hw, IXGBE_WUS));
2786         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2787         /* And clear WUFC until next low-power transition */
2788         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2789
2790         /*
2791          * Required after D3->D0 transition;
2792          * will re-advertise all previous advertised speeds
2793          */
2794         if (ifp->if_flags & IFF_UP)
2795                 ixgbe_init_locked(adapter);
2796
2797         IXGBE_CORE_UNLOCK(adapter);
2798
2799         return (0);
2800 } /* ixgbe_resume */
2801
2802 /************************************************************************
2803  * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2804  *
2805  *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2806  *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2807  *   field what mbuf offload flags the driver will understand.
2808  ************************************************************************/
2809 static void
2810 ixgbe_set_if_hwassist(struct adapter *adapter)
2811 {
2812         struct ifnet *ifp = adapter->ifp;
2813
2814         ifp->if_hwassist = 0;
2815 #if __FreeBSD_version >= 1000000
2816         if (ifp->if_capenable & IFCAP_TSO4)
2817                 ifp->if_hwassist |= CSUM_IP_TSO;
2818         if (ifp->if_capenable & IFCAP_TSO6)
2819                 ifp->if_hwassist |= CSUM_IP6_TSO;
2820         if (ifp->if_capenable & IFCAP_TXCSUM) {
2821                 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2822                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2823                         ifp->if_hwassist |= CSUM_IP_SCTP;
2824         }
2825         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2826                 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2827                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2828                         ifp->if_hwassist |= CSUM_IP6_SCTP;
2829         }
2830 #else
2831         if (ifp->if_capenable & IFCAP_TSO)
2832                 ifp->if_hwassist |= CSUM_TSO;
2833         if (ifp->if_capenable & IFCAP_TXCSUM) {
2834                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2835                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2836                         ifp->if_hwassist |= CSUM_SCTP;
2837         }
2838 #endif
2839 } /* ixgbe_set_if_hwassist */
2840
2841 /************************************************************************
2842  * ixgbe_init_locked - Init entry point
2843  *
2844  *   Used in two ways: It is used by the stack as an init
2845  *   entry point in network interface structure. It is also
2846  *   used by the driver as a hw/sw initialization routine to
2847  *   get to a consistent state.
2848  *
2849  *   return 0 on success, positive on failure
2850  ************************************************************************/
2851 void
2852 ixgbe_init_locked(struct adapter *adapter)
2853 {
2854         struct ifnet    *ifp = adapter->ifp;
2855         device_t        dev = adapter->dev;
2856         struct ixgbe_hw *hw = &adapter->hw;
2857         struct tx_ring  *txr;
2858         struct rx_ring  *rxr;
2859         u32             txdctl, mhadd;
2860         u32             rxdctl, rxctrl;
2861         u32             ctrl_ext;
2862         int             err = 0;
2863
2864         mtx_assert(&adapter->core_mtx, MA_OWNED);
2865         INIT_DEBUGOUT("ixgbe_init_locked: begin");
2866
2867         hw->adapter_stopped = FALSE;
2868         ixgbe_stop_adapter(hw);
2869         callout_stop(&adapter->timer);
2870
2871         /* Queue indices may change with IOV mode */
2872         ixgbe_align_all_queue_indices(adapter);
2873
2874         /* reprogram the RAR[0] in case user changed it. */
2875         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2876
2877         /* Get the latest mac address, User can use a LAA */
2878         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2879         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2880         hw->addr_ctrl.rar_used_count = 1;
2881
2882         /* Set hardware offload abilities from ifnet flags */
2883         ixgbe_set_if_hwassist(adapter);
2884
2885         /* Prepare transmit descriptors and buffers */
2886         if (ixgbe_setup_transmit_structures(adapter)) {
2887                 device_printf(dev, "Could not setup transmit structures\n");
2888                 ixgbe_stop(adapter);
2889                 return;
2890         }
2891
2892         ixgbe_init_hw(hw);
2893         ixgbe_initialize_iov(adapter);
2894         ixgbe_initialize_transmit_units(adapter);
2895
2896         /* Setup Multicast table */
2897         ixgbe_set_multi(adapter);
2898
2899         /* Determine the correct mbuf pool, based on frame size */
2900         if (adapter->max_frame_size <= MCLBYTES)
2901                 adapter->rx_mbuf_sz = MCLBYTES;
2902         else
2903                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2904
2905         /* Prepare receive descriptors and buffers */
2906         if (ixgbe_setup_receive_structures(adapter)) {
2907                 device_printf(dev, "Could not setup receive structures\n");
2908                 ixgbe_stop(adapter);
2909                 return;
2910         }
2911
2912         /* Configure RX settings */
2913         ixgbe_initialize_receive_units(adapter);
2914
2915         /* Enable SDP & MSI-X interrupts based on adapter */
2916         ixgbe_config_gpie(adapter);
2917
2918         /* Set MTU size */
2919         if (ifp->if_mtu > ETHERMTU) {
2920                 /* aka IXGBE_MAXFRS on 82599 and newer */
2921                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2922                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2923                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2924                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2925         }
2926
2927         /* Now enable all the queues */
2928         for (int i = 0; i < adapter->num_queues; i++) {
2929                 txr = &adapter->tx_rings[i];
2930                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2931                 txdctl |= IXGBE_TXDCTL_ENABLE;
2932                 /* Set WTHRESH to 8, burst writeback */
2933                 txdctl |= (8 << 16);
2934                 /*
2935                  * When the internal queue falls below PTHRESH (32),
2936                  * start prefetching as long as there are at least
2937                  * HTHRESH (1) buffers ready. The values are taken
2938                  * from the Intel linux driver 3.8.21.
2939                  * Prefetching enables tx line rate even with 1 queue.
2940                  */
2941                 txdctl |= (32 << 0) | (1 << 8);
2942                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2943         }
2944
2945         for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2946                 rxr = &adapter->rx_rings[i];
2947                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2948                 if (hw->mac.type == ixgbe_mac_82598EB) {
2949                         /*
2950                          * PTHRESH = 21
2951                          * HTHRESH = 4
2952                          * WTHRESH = 8
2953                          */
2954                         rxdctl &= ~0x3FFFFF;
2955                         rxdctl |= 0x080420;
2956                 }
2957                 rxdctl |= IXGBE_RXDCTL_ENABLE;
2958                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2959                 for (; j < 10; j++) {
2960                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2961                             IXGBE_RXDCTL_ENABLE)
2962                                 break;
2963                         else
2964                                 msec_delay(1);
2965                 }
2966                 wmb();
2967
2968                 /*
2969                  * In netmap mode, we must preserve the buffers made
2970                  * available to userspace before the if_init()
2971                  * (this is true by default on the TX side, because
2972                  * init makes all buffers available to userspace).
2973                  *
2974                  * netmap_reset() and the device specific routines
2975                  * (e.g. ixgbe_setup_receive_rings()) map these
2976                  * buffers at the end of the NIC ring, so here we
2977                  * must set the RDT (tail) register to make sure
2978                  * they are not overwritten.
2979                  *
2980                  * In this driver the NIC ring starts at RDH = 0,
2981                  * RDT points to the last slot available for reception (?),
2982                  * so RDT = num_rx_desc - 1 means the whole ring is available.
2983                  */
2984 #ifdef DEV_NETMAP
2985                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
2986                     (ifp->if_capenable & IFCAP_NETMAP)) {
2987                         struct netmap_adapter *na = NA(adapter->ifp);
2988                         struct netmap_kring *kring = &na->rx_rings[i];
2989                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
2990
2991                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
2992                 } else
2993 #endif /* DEV_NETMAP */
2994                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
2995                             adapter->num_rx_desc - 1);
2996         }
2997
2998         /* Enable Receive engine */
2999         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3000         if (hw->mac.type == ixgbe_mac_82598EB)
3001                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3002         rxctrl |= IXGBE_RXCTRL_RXEN;
3003         ixgbe_enable_rx_dma(hw, rxctrl);
3004
3005         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3006
3007         /* Set up MSI-X routing */
3008         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3009                 ixgbe_configure_ivars(adapter);
3010                 /* Set up auto-mask */
3011                 if (hw->mac.type == ixgbe_mac_82598EB)
3012                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3013                 else {
3014                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3015                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3016                 }
3017         } else {  /* Simple settings for Legacy/MSI */
3018                 ixgbe_set_ivar(adapter, 0, 0, 0);
3019                 ixgbe_set_ivar(adapter, 0, 0, 1);
3020                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3021         }
3022
3023         ixgbe_init_fdir(adapter);
3024
3025         /*
3026          * Check on any SFP devices that
3027          * need to be kick-started
3028          */
3029         if (hw->phy.type == ixgbe_phy_none) {
3030                 err = hw->phy.ops.identify(hw);
3031                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3032                         device_printf(dev,
3033                             "Unsupported SFP+ module type was detected.\n");
3034                         return;
3035                 }
3036         }
3037
3038         /* Set moderation on the Link interrupt */
3039         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3040
3041         /* Config/Enable Link */
3042         ixgbe_config_link(adapter);
3043
3044         /* Hardware Packet Buffer & Flow Control setup */
3045         ixgbe_config_delay_values(adapter);
3046
3047         /* Initialize the FC settings */
3048         ixgbe_start_hw(hw);
3049
3050         /* Set up VLAN support and filter */
3051         ixgbe_setup_vlan_hw_support(adapter);
3052
3053         /* Setup DMA Coalescing */
3054         ixgbe_config_dmac(adapter);
3055
3056         /* And now turn on interrupts */
3057         ixgbe_enable_intr(adapter);
3058
3059         /* Enable the use of the MBX by the VF's */
3060         if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3061                 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3062                 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3063                 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3064         }
3065
3066         /* Now inform the stack we're ready */
3067         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3068
3069         return;
3070 } /* ixgbe_init_locked */
3071
3072 /************************************************************************
3073  * ixgbe_init
3074  ************************************************************************/
3075 static void
3076 ixgbe_init(void *arg)
3077 {
3078         struct adapter *adapter = arg;
3079
3080         IXGBE_CORE_LOCK(adapter);
3081         ixgbe_init_locked(adapter);
3082         IXGBE_CORE_UNLOCK(adapter);
3083
3084         return;
3085 } /* ixgbe_init */
3086
3087 /************************************************************************
3088  * ixgbe_set_ivar
3089  *
3090  *   Setup the correct IVAR register for a particular MSI-X interrupt
3091  *     (yes this is all very magic and confusing :)
3092  *    - entry is the register array entry
3093  *    - vector is the MSI-X vector for this queue
3094  *    - type is RX/TX/MISC
3095  ************************************************************************/
3096 static void
3097 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3098 {
3099         struct ixgbe_hw *hw = &adapter->hw;
3100         u32 ivar, index;
3101
3102         vector |= IXGBE_IVAR_ALLOC_VAL;
3103
3104         switch (hw->mac.type) {
3105
3106         case ixgbe_mac_82598EB:
3107                 if (type == -1)
3108                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3109                 else
3110                         entry += (type * 64);
3111                 index = (entry >> 2) & 0x1F;
3112                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3113                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3114                 ivar |= (vector << (8 * (entry & 0x3)));
3115                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3116                 break;
3117
3118         case ixgbe_mac_82599EB:
3119         case ixgbe_mac_X540:
3120         case ixgbe_mac_X550:
3121         case ixgbe_mac_X550EM_x:
3122         case ixgbe_mac_X550EM_a:
3123                 if (type == -1) { /* MISC IVAR */
3124                         index = (entry & 1) * 8;
3125                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3126                         ivar &= ~(0xFF << index);
3127                         ivar |= (vector << index);
3128                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3129                 } else {          /* RX/TX IVARS */
3130                         index = (16 * (entry & 1)) + (8 * type);
3131                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3132                         ivar &= ~(0xFF << index);
3133                         ivar |= (vector << index);
3134                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3135                 }
3136
3137         default:
3138                 break;
3139         }
3140 } /* ixgbe_set_ivar */
3141
3142 /************************************************************************
3143  * ixgbe_configure_ivars
3144  ************************************************************************/
3145 static void
3146 ixgbe_configure_ivars(struct adapter *adapter)
3147 {
3148         struct ix_queue *que = adapter->queues;
3149         u32             newitr;
3150
3151         if (ixgbe_max_interrupt_rate > 0)
3152                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3153         else {
3154                 /*
3155                  * Disable DMA coalescing if interrupt moderation is
3156                  * disabled.
3157                  */
3158                 adapter->dmac = 0;
3159                 newitr = 0;
3160         }
3161
3162         for (int i = 0; i < adapter->num_queues; i++, que++) {
3163                 struct rx_ring *rxr = &adapter->rx_rings[i];
3164                 struct tx_ring *txr = &adapter->tx_rings[i];
3165                 /* First the RX queue entry */
3166                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3167                 /* ... and the TX */
3168                 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3169                 /* Set an Initial EITR value */
3170                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3171         }
3172
3173         /* For the Link interrupt */
3174         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3175 } /* ixgbe_configure_ivars */
3176
3177 /************************************************************************
3178  * ixgbe_config_gpie
3179  ************************************************************************/
3180 static void
3181 ixgbe_config_gpie(struct adapter *adapter)
3182 {
3183         struct ixgbe_hw *hw = &adapter->hw;
3184         u32             gpie;
3185
3186         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3187
3188         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3189                 /* Enable Enhanced MSI-X mode */
3190                 gpie |= IXGBE_GPIE_MSIX_MODE
3191                      |  IXGBE_GPIE_EIAME
3192                      |  IXGBE_GPIE_PBA_SUPPORT
3193                      |  IXGBE_GPIE_OCD;
3194         }
3195
3196         /* Fan Failure Interrupt */
3197         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3198                 gpie |= IXGBE_SDP1_GPIEN;
3199
3200         /* Thermal Sensor Interrupt */
3201         if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3202                 gpie |= IXGBE_SDP0_GPIEN_X540;
3203
3204         /* Link detection */
3205         switch (hw->mac.type) {
3206         case ixgbe_mac_82599EB:
3207                 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3208                 break;
3209         case ixgbe_mac_X550EM_x:
3210         case ixgbe_mac_X550EM_a:
3211                 gpie |= IXGBE_SDP0_GPIEN_X540;
3212                 break;
3213         default:
3214                 break;
3215         }
3216
3217         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3218
3219         return;
3220 } /* ixgbe_config_gpie */
3221
3222 /************************************************************************
3223  * ixgbe_config_delay_values
3224  *
3225  *   Requires adapter->max_frame_size to be set.
3226  ************************************************************************/
3227 static void
3228 ixgbe_config_delay_values(struct adapter *adapter)
3229 {
3230         struct ixgbe_hw *hw = &adapter->hw;
3231         u32             rxpb, frame, size, tmp;
3232
3233         frame = adapter->max_frame_size;
3234
3235         /* Calculate High Water */
3236         switch (hw->mac.type) {
3237         case ixgbe_mac_X540:
3238         case ixgbe_mac_X550:
3239         case ixgbe_mac_X550EM_x:
3240         case ixgbe_mac_X550EM_a:
3241                 tmp = IXGBE_DV_X540(frame, frame);
3242                 break;
3243         default:
3244                 tmp = IXGBE_DV(frame, frame);
3245                 break;
3246         }
3247         size = IXGBE_BT2KB(tmp);
3248         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3249         hw->fc.high_water[0] = rxpb - size;
3250
3251         /* Now calculate Low Water */
3252         switch (hw->mac.type) {
3253         case ixgbe_mac_X540:
3254         case ixgbe_mac_X550:
3255         case ixgbe_mac_X550EM_x:
3256         case ixgbe_mac_X550EM_a:
3257                 tmp = IXGBE_LOW_DV_X540(frame);
3258                 break;
3259         default:
3260                 tmp = IXGBE_LOW_DV(frame);
3261                 break;
3262         }
3263         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3264
3265         hw->fc.pause_time = IXGBE_FC_PAUSE;
3266         hw->fc.send_xon = TRUE;
3267 } /* ixgbe_config_delay_values */
3268
3269 /************************************************************************
3270  * ixgbe_set_multi - Multicast Update
3271  *
3272  *   Called whenever multicast address list is updated.
3273  ************************************************************************/
3274 static void
3275 ixgbe_set_multi(struct adapter *adapter)
3276 {
3277         struct ifmultiaddr   *ifma;
3278         struct ixgbe_mc_addr *mta;
3279         struct ifnet         *ifp = adapter->ifp;
3280         u8                   *update_ptr;
3281         int                  mcnt = 0;
3282         u32                  fctrl;
3283
3284         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3285
3286         mta = adapter->mta;
3287         bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3288
3289 #if __FreeBSD_version < 800000
3290         IF_ADDR_LOCK(ifp);
3291 #else
3292         if_maddr_rlock(ifp);
3293 #endif
3294         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3295                 if (ifma->ifma_addr->sa_family != AF_LINK)
3296                         continue;
3297                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3298                         break;
3299                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3300                     mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3301                 mta[mcnt].vmdq = adapter->pool;
3302                 mcnt++;
3303         }
3304 #if __FreeBSD_version < 800000
3305         IF_ADDR_UNLOCK(ifp);
3306 #else
3307         if_maddr_runlock(ifp);
3308 #endif
3309
3310         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3311         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3312         if (ifp->if_flags & IFF_PROMISC)
3313                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3314         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3315             ifp->if_flags & IFF_ALLMULTI) {
3316                 fctrl |= IXGBE_FCTRL_MPE;
3317                 fctrl &= ~IXGBE_FCTRL_UPE;
3318         } else
3319                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3320
3321         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3322
3323         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3324                 update_ptr = (u8 *)mta;
3325                 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3326                     ixgbe_mc_array_itr, TRUE);
3327         }
3328
3329         return;
3330 } /* ixgbe_set_multi */
3331
3332 /************************************************************************
3333  * ixgbe_mc_array_itr
3334  *
3335  *   An iterator function needed by the multicast shared code.
3336  *   It feeds the shared code routine the addresses in the
3337  *   array of ixgbe_set_multi() one by one.
3338  ************************************************************************/
3339 static u8 *
3340 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3341 {
3342         struct ixgbe_mc_addr *mta;
3343
3344         mta = (struct ixgbe_mc_addr *)*update_ptr;
3345         *vmdq = mta->vmdq;
3346
3347         *update_ptr = (u8*)(mta + 1);
3348
3349         return (mta->addr);
3350 } /* ixgbe_mc_array_itr */
3351
3352 /************************************************************************
3353  * ixgbe_local_timer - Timer routine
3354  *
3355  *   Checks for link status, updates statistics,
3356  *   and runs the watchdog check.
3357  ************************************************************************/
3358 static void
3359 ixgbe_local_timer(void *arg)
3360 {
3361         struct adapter  *adapter = arg;
3362         device_t        dev = adapter->dev;
3363         struct ix_queue *que = adapter->queues;
3364         u64             queues = 0;
3365         int             hung = 0;
3366
3367         mtx_assert(&adapter->core_mtx, MA_OWNED);
3368
3369         /* Check for pluggable optics */
3370         if (adapter->sfp_probe)
3371                 if (!ixgbe_sfp_probe(adapter))
3372                         goto out; /* Nothing to do */
3373
3374         ixgbe_update_link_status(adapter);
3375         ixgbe_update_stats_counters(adapter);
3376
3377         /*
3378          * Check the TX queues status
3379          *      - mark hung queues so we don't schedule on them
3380          *      - watchdog only if all queues show hung
3381          */
3382         for (int i = 0; i < adapter->num_queues; i++, que++) {
3383                 /* Keep track of queues with work for soft irq */
3384                 if (que->txr->busy)
3385                         queues |= ((u64)1 << que->me);
3386                 /*
3387                  * Each time txeof runs without cleaning, but there
3388                  * are uncleaned descriptors it increments busy. If
3389                  * we get to the MAX we declare it hung.
3390                  */
3391                 if (que->busy == IXGBE_QUEUE_HUNG) {
3392                         ++hung;
3393                         /* Mark the queue as inactive */
3394                         adapter->active_queues &= ~((u64)1 << que->me);
3395                         continue;
3396                 } else {
3397                         /* Check if we've come back from hung */
3398                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3399                                 adapter->active_queues |= ((u64)1 << que->me);
3400                 }
3401                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3402                         device_printf(dev,
3403                             "Warning queue %d appears to be hung!\n", i);
3404                         que->txr->busy = IXGBE_QUEUE_HUNG;
3405                         ++hung;
3406                 }
3407         }
3408
3409         /* Only truly watchdog if all queues show hung */
3410         if (hung == adapter->num_queues)
3411                 goto watchdog;
3412         else if (queues != 0) { /* Force an IRQ on queues with work */
3413                 ixgbe_rearm_queues(adapter, queues);
3414         }
3415
3416 out:
3417         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3418         return;
3419
3420 watchdog:
3421         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3422         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3423         adapter->watchdog_events++;
3424         ixgbe_init_locked(adapter);
3425 } /* ixgbe_local_timer */
3426
3427 /************************************************************************
3428  * ixgbe_sfp_probe
3429  *
3430  *   Determine if a port had optics inserted.
3431  ************************************************************************/
3432 static bool
3433 ixgbe_sfp_probe(struct adapter *adapter)
3434 {
3435         struct ixgbe_hw *hw = &adapter->hw;
3436         device_t        dev = adapter->dev;
3437         bool            result = FALSE;
3438
3439         if ((hw->phy.type == ixgbe_phy_nl) &&
3440             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3441                 s32 ret = hw->phy.ops.identify_sfp(hw);
3442                 if (ret)
3443                         goto out;
3444                 ret = hw->phy.ops.reset(hw);
3445                 adapter->sfp_probe = FALSE;
3446                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3447                         device_printf(dev, "Unsupported SFP+ module detected!");
3448                         device_printf(dev,
3449                             "Reload driver with supported module.\n");
3450                         goto out;
3451                 } else
3452                         device_printf(dev, "SFP+ module detected!\n");
3453                 /* We now have supported optics */
3454                 result = TRUE;
3455         }
3456 out:
3457
3458         return (result);
3459 } /* ixgbe_sfp_probe */
3460
3461 /************************************************************************
3462  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3463  ************************************************************************/
3464 static void
3465 ixgbe_handle_mod(void *context, int pending)
3466 {
3467         struct adapter  *adapter = context;
3468         struct ixgbe_hw *hw = &adapter->hw;
3469         device_t        dev = adapter->dev;
3470         u32             err, cage_full = 0;
3471
3472         if (adapter->hw.need_crosstalk_fix) {
3473                 switch (hw->mac.type) {
3474                 case ixgbe_mac_82599EB:
3475                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3476                             IXGBE_ESDP_SDP2;
3477                         break;
3478                 case ixgbe_mac_X550EM_x:
3479                 case ixgbe_mac_X550EM_a:
3480                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3481                             IXGBE_ESDP_SDP0;
3482                         break;
3483                 default:
3484                         break;
3485                 }
3486
3487                 if (!cage_full)
3488                         return;
3489         }
3490
3491         err = hw->phy.ops.identify_sfp(hw);
3492         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3493                 device_printf(dev,
3494                     "Unsupported SFP+ module type was detected.\n");
3495                 return;
3496         }
3497
3498         err = hw->mac.ops.setup_sfp(hw);
3499         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3500                 device_printf(dev,
3501                     "Setup failure - unsupported SFP+ module type.\n");
3502                 return;
3503         }
3504         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3505 } /* ixgbe_handle_mod */
3506
3507
3508 /************************************************************************
3509  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3510  ************************************************************************/
3511 static void
3512 ixgbe_handle_msf(void *context, int pending)
3513 {
3514         struct adapter  *adapter = context;
3515         struct ixgbe_hw *hw = &adapter->hw;
3516         u32             autoneg;
3517         bool            negotiate;
3518
3519         /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3520         adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3521
3522         autoneg = hw->phy.autoneg_advertised;
3523         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3524                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3525         if (hw->mac.ops.setup_link)
3526                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3527
3528         /* Adjust media types shown in ifconfig */
3529         ifmedia_removeall(&adapter->media);
3530         ixgbe_add_media_types(adapter);
3531         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3532 } /* ixgbe_handle_msf */
3533
3534 /************************************************************************
3535  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3536  ************************************************************************/
3537 static void
3538 ixgbe_handle_phy(void *context, int pending)
3539 {
3540         struct adapter  *adapter = context;
3541         struct ixgbe_hw *hw = &adapter->hw;
3542         int             error;
3543
3544         error = hw->phy.ops.handle_lasi(hw);
3545         if (error == IXGBE_ERR_OVERTEMP)
3546                 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3547         else if (error)
3548                 device_printf(adapter->dev,
3549                     "Error handling LASI interrupt: %d\n", error);
3550 } /* ixgbe_handle_phy */
3551
3552 /************************************************************************
3553  * ixgbe_stop - Stop the hardware
3554  *
3555  *   Disables all traffic on the adapter by issuing a
3556  *   global reset on the MAC and deallocates TX/RX buffers.
3557  ************************************************************************/
3558 static void
3559 ixgbe_stop(void *arg)
3560 {
3561         struct ifnet    *ifp;
3562         struct adapter  *adapter = arg;
3563         struct ixgbe_hw *hw = &adapter->hw;
3564
3565         ifp = adapter->ifp;
3566
3567         mtx_assert(&adapter->core_mtx, MA_OWNED);
3568
3569         INIT_DEBUGOUT("ixgbe_stop: begin\n");
3570         ixgbe_disable_intr(adapter);
3571         callout_stop(&adapter->timer);
3572
3573         /* Let the stack know...*/
3574         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3575
3576         ixgbe_reset_hw(hw);
3577         hw->adapter_stopped = FALSE;
3578         ixgbe_stop_adapter(hw);
3579         if (hw->mac.type == ixgbe_mac_82599EB)
3580                 ixgbe_stop_mac_link_on_d3_82599(hw);
3581         /* Turn off the laser - noop with no optics */
3582         ixgbe_disable_tx_laser(hw);
3583
3584         /* Update the stack */
3585         adapter->link_up = FALSE;
3586         ixgbe_update_link_status(adapter);
3587
3588         /* reprogram the RAR[0] in case user changed it. */
3589         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3590
3591         return;
3592 } /* ixgbe_stop */
3593
3594 /************************************************************************
3595  * ixgbe_update_link_status - Update OS on link state
3596  *
3597  * Note: Only updates the OS on the cached link state.
3598  *       The real check of the hardware only happens with
3599  *       a link interrupt.
3600  ************************************************************************/
3601 static void
3602 ixgbe_update_link_status(struct adapter *adapter)
3603 {
3604         struct ifnet *ifp = adapter->ifp;
3605         device_t     dev = adapter->dev;
3606
3607         if (adapter->link_up) {
3608                 if (adapter->link_active == FALSE) {
3609                         if (bootverbose)
3610                                 device_printf(dev, "Link is up %d Gbps %s \n",
3611                                     ((adapter->link_speed == 128) ? 10 : 1),
3612                                     "Full Duplex");
3613                         adapter->link_active = TRUE;
3614                         /* Update any Flow Control changes */
3615                         ixgbe_fc_enable(&adapter->hw);
3616                         /* Update DMA coalescing config */
3617                         ixgbe_config_dmac(adapter);
3618                         if_link_state_change(ifp, LINK_STATE_UP);
3619                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3620                                 ixgbe_ping_all_vfs(adapter);
3621                 }
3622         } else { /* Link down */
3623                 if (adapter->link_active == TRUE) {
3624                         if (bootverbose)
3625                                 device_printf(dev, "Link is Down\n");
3626                         if_link_state_change(ifp, LINK_STATE_DOWN);
3627                         adapter->link_active = FALSE;
3628                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3629                                 ixgbe_ping_all_vfs(adapter);
3630                 }
3631         }
3632
3633         return;
3634 } /* ixgbe_update_link_status */
3635
3636 /************************************************************************
3637  * ixgbe_config_dmac - Configure DMA Coalescing
3638  ************************************************************************/
3639 static void
3640 ixgbe_config_dmac(struct adapter *adapter)
3641 {
3642         struct ixgbe_hw          *hw = &adapter->hw;
3643         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3644
3645         if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3646                 return;
3647
3648         if (dcfg->watchdog_timer ^ adapter->dmac ||
3649             dcfg->link_speed ^ adapter->link_speed) {
3650                 dcfg->watchdog_timer = adapter->dmac;
3651                 dcfg->fcoe_en = false;
3652                 dcfg->link_speed = adapter->link_speed;
3653                 dcfg->num_tcs = 1;
3654
3655                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3656                     dcfg->watchdog_timer, dcfg->link_speed);
3657
3658                 hw->mac.ops.dmac_config(hw);
3659         }
3660 } /* ixgbe_config_dmac */
3661
3662 /************************************************************************
3663  * ixgbe_enable_intr
3664  ************************************************************************/
3665 static void
3666 ixgbe_enable_intr(struct adapter *adapter)
3667 {
3668         struct ixgbe_hw *hw = &adapter->hw;
3669         struct ix_queue *que = adapter->queues;
3670         u32             mask, fwsm;
3671
3672         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3673
3674         switch (adapter->hw.mac.type) {
3675         case ixgbe_mac_82599EB:
3676                 mask |= IXGBE_EIMS_ECC;
3677                 /* Temperature sensor on some adapters */
3678                 mask |= IXGBE_EIMS_GPI_SDP0;
3679                 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3680                 mask |= IXGBE_EIMS_GPI_SDP1;
3681                 mask |= IXGBE_EIMS_GPI_SDP2;
3682                 break;
3683         case ixgbe_mac_X540:
3684                 /* Detect if Thermal Sensor is enabled */
3685                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3686                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3687                         mask |= IXGBE_EIMS_TS;
3688                 mask |= IXGBE_EIMS_ECC;
3689                 break;
3690         case ixgbe_mac_X550:
3691                 /* MAC thermal sensor is automatically enabled */
3692                 mask |= IXGBE_EIMS_TS;
3693                 mask |= IXGBE_EIMS_ECC;
3694                 break;
3695         case ixgbe_mac_X550EM_x:
3696         case ixgbe_mac_X550EM_a:
3697                 /* Some devices use SDP0 for important information */
3698                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3699                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3700                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3701                     hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3702                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3703                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3704                         mask |= IXGBE_EICR_GPI_SDP0_X540;
3705                 mask |= IXGBE_EIMS_ECC;
3706                 break;
3707         default:
3708                 break;
3709         }
3710
3711         /* Enable Fan Failure detection */
3712         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3713                 mask |= IXGBE_EIMS_GPI_SDP1;
3714         /* Enable SR-IOV */
3715         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3716                 mask |= IXGBE_EIMS_MAILBOX;
3717         /* Enable Flow Director */
3718         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3719                 mask |= IXGBE_EIMS_FLOW_DIR;
3720
3721         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3722
3723         /* With MSI-X we use auto clear */
3724         if (adapter->msix_mem) {
3725                 mask = IXGBE_EIMS_ENABLE_MASK;
3726                 /* Don't autoclear Link */
3727                 mask &= ~IXGBE_EIMS_OTHER;
3728                 mask &= ~IXGBE_EIMS_LSC;
3729                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3730                         mask &= ~IXGBE_EIMS_MAILBOX;
3731                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3732         }
3733
3734         /*
3735          * Now enable all queues, this is done separately to
3736          * allow for handling the extended (beyond 32) MSI-X
3737          * vectors that can be used by 82599
3738          */
3739         for (int i = 0; i < adapter->num_queues; i++, que++)
3740                 ixgbe_enable_queue(adapter, que->msix);
3741
3742         IXGBE_WRITE_FLUSH(hw);
3743
3744         return;
3745 } /* ixgbe_enable_intr */
3746
3747 /************************************************************************
3748  * ixgbe_disable_intr
3749  ************************************************************************/
3750 static void
3751 ixgbe_disable_intr(struct adapter *adapter)
3752 {
3753         if (adapter->msix_mem)
3754                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3755         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3756                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3757         } else {
3758                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3759                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3760                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3761         }
3762         IXGBE_WRITE_FLUSH(&adapter->hw);
3763
3764         return;
3765 } /* ixgbe_disable_intr */
3766
3767 /************************************************************************
3768  * ixgbe_legacy_irq - Legacy Interrupt Service routine
3769  ************************************************************************/
3770 static void
3771 ixgbe_legacy_irq(void *arg)
3772 {
3773         struct ix_queue *que = arg;
3774         struct adapter  *adapter = que->adapter;
3775         struct ixgbe_hw *hw = &adapter->hw;
3776         struct ifnet    *ifp = adapter->ifp;
3777         struct tx_ring  *txr = adapter->tx_rings;
3778         bool            more = false;
3779         u32             eicr, eicr_mask;
3780
3781         /* Silicon errata #26 on 82598 */
3782         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3783
3784         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3785
3786         ++que->irqs;
3787         if (eicr == 0) {
3788                 ixgbe_enable_intr(adapter);
3789                 return;
3790         }
3791
3792         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3793                 more = ixgbe_rxeof(que);
3794
3795                 IXGBE_TX_LOCK(txr);
3796                 ixgbe_txeof(txr);
3797                 if (!ixgbe_ring_empty(ifp, txr->br))
3798                         ixgbe_start_locked(ifp, txr);
3799                 IXGBE_TX_UNLOCK(txr);
3800         }
3801
3802         /* Check for fan failure */
3803         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3804                 ixgbe_check_fan_failure(adapter, eicr, true);
3805                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3806         }
3807
3808         /* Link status change */
3809         if (eicr & IXGBE_EICR_LSC)
3810                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
3811
3812         if (ixgbe_is_sfp(hw)) {
3813                 /* Pluggable optics-related interrupt */
3814                 if (hw->mac.type >= ixgbe_mac_X540)
3815                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3816                 else
3817                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3818
3819                 if (eicr & eicr_mask) {
3820                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3821                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
3822                 }
3823
3824                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3825                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3826                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
3827                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3828                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3829                 }
3830         }
3831
3832         /* External PHY interrupt */
3833         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3834             (eicr & IXGBE_EICR_GPI_SDP0_X540))
3835                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
3836
3837         if (more)
3838                 taskqueue_enqueue(que->tq, &que->que_task);
3839         else
3840                 ixgbe_enable_intr(adapter);
3841
3842         return;
3843 } /* ixgbe_legacy_irq */
3844
3845 /************************************************************************
3846  * ixgbe_free_pci_resources
3847  ************************************************************************/
3848 static void
3849 ixgbe_free_pci_resources(struct adapter *adapter)
3850 {
3851         struct ix_queue *que = adapter->queues;
3852         device_t        dev = adapter->dev;
3853         int             rid, memrid;
3854
3855         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3856                 memrid = PCIR_BAR(MSIX_82598_BAR);
3857         else
3858                 memrid = PCIR_BAR(MSIX_82599_BAR);
3859
3860         /*
3861          * There is a slight possibility of a failure mode
3862          * in attach that will result in entering this function
3863          * before interrupt resources have been initialized, and
3864          * in that case we do not want to execute the loops below
3865          * We can detect this reliably by the state of the adapter
3866          * res pointer.
3867          */
3868         if (adapter->res == NULL)
3869                 goto mem;
3870
3871         /*
3872          * Release all msix queue resources:
3873          */
3874         for (int i = 0; i < adapter->num_queues; i++, que++) {
3875                 rid = que->msix + 1;
3876                 if (que->tag != NULL) {
3877                         bus_teardown_intr(dev, que->res, que->tag);
3878                         que->tag = NULL;
3879                 }
3880                 if (que->res != NULL)
3881                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3882         }
3883
3884
3885         if (adapter->tag != NULL) {
3886                 bus_teardown_intr(dev, adapter->res, adapter->tag);
3887                 adapter->tag = NULL;
3888         }
3889
3890         /* Clean the Legacy or Link interrupt last */
3891         if (adapter->res != NULL)
3892                 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3893                     adapter->res);
3894
3895 mem:
3896         if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3897             (adapter->feat_en & IXGBE_FEATURE_MSIX))
3898                 pci_release_msi(dev);
3899
3900         if (adapter->msix_mem != NULL)
3901                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3902                     adapter->msix_mem);
3903
3904         if (adapter->pci_mem != NULL)
3905                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3906                     adapter->pci_mem);
3907
3908         return;
3909 } /* ixgbe_free_pci_resources */
3910
3911 /************************************************************************
3912  * ixgbe_set_sysctl_value
3913  ************************************************************************/
3914 static void
3915 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3916     const char *description, int *limit, int value)
3917 {
3918         *limit = value;
3919         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3920             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3921             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
3922 } /* ixgbe_set_sysctl_value */
3923
3924 /************************************************************************
3925  * ixgbe_sysctl_flowcntl
3926  *
3927  *   SYSCTL wrapper around setting Flow Control
3928  ************************************************************************/
3929 static int
3930 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
3931 {
3932         struct adapter *adapter;
3933         int            error, fc;
3934
3935         adapter = (struct adapter *)arg1;
3936         fc = adapter->hw.fc.current_mode;
3937
3938         error = sysctl_handle_int(oidp, &fc, 0, req);
3939         if ((error) || (req->newptr == NULL))
3940                 return (error);
3941
3942         /* Don't bother if it's not changed */
3943         if (fc == adapter->hw.fc.current_mode)
3944                 return (0);
3945
3946         return ixgbe_set_flowcntl(adapter, fc);
3947 } /* ixgbe_sysctl_flowcntl */
3948
3949 /************************************************************************
3950  * ixgbe_set_flowcntl - Set flow control
3951  *
3952  *   Flow control values:
3953  *     0 - off
3954  *     1 - rx pause
3955  *     2 - tx pause
3956  *     3 - full
3957  ************************************************************************/
3958 static int
3959 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
3960 {
3961         switch (fc) {
3962         case ixgbe_fc_rx_pause:
3963         case ixgbe_fc_tx_pause:
3964         case ixgbe_fc_full:
3965                 adapter->hw.fc.requested_mode = fc;
3966                 if (adapter->num_queues > 1)
3967                         ixgbe_disable_rx_drop(adapter);
3968                 break;
3969         case ixgbe_fc_none:
3970                 adapter->hw.fc.requested_mode = ixgbe_fc_none;
3971                 if (adapter->num_queues > 1)
3972                         ixgbe_enable_rx_drop(adapter);
3973                 break;
3974         default:
3975                 return (EINVAL);
3976         }
3977
3978         /* Don't autoneg if forcing a value */
3979         adapter->hw.fc.disable_fc_autoneg = TRUE;
3980         ixgbe_fc_enable(&adapter->hw);
3981
3982         return (0);
3983 } /* ixgbe_set_flowcntl */
3984
3985 /************************************************************************
3986  * ixgbe_enable_rx_drop
3987  *
3988  *   Enable the hardware to drop packets when the buffer is
3989  *   full. This is useful with multiqueue, so that no single
3990  *   queue being full stalls the entire RX engine. We only
3991  *   enable this when Multiqueue is enabled AND Flow Control
3992  *   is disabled.
3993  ************************************************************************/
3994 static void
3995 ixgbe_enable_rx_drop(struct adapter *adapter)
3996 {
3997         struct ixgbe_hw *hw = &adapter->hw;
3998         struct rx_ring  *rxr;
3999         u32             srrctl;
4000
4001         for (int i = 0; i < adapter->num_queues; i++) {
4002                 rxr = &adapter->rx_rings[i];
4003                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4004                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4005                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4006         }
4007
4008         /* enable drop for each vf */
4009         for (int i = 0; i < adapter->num_vfs; i++) {
4010                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4011                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4012                     IXGBE_QDE_ENABLE));
4013         }
4014 } /* ixgbe_enable_rx_drop */
4015
4016 /************************************************************************
4017  * ixgbe_disable_rx_drop
4018  ************************************************************************/
4019 static void
4020 ixgbe_disable_rx_drop(struct adapter *adapter)
4021 {
4022         struct ixgbe_hw *hw = &adapter->hw;
4023         struct rx_ring  *rxr;
4024         u32             srrctl;
4025
4026         for (int i = 0; i < adapter->num_queues; i++) {
4027                 rxr = &adapter->rx_rings[i];
4028                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4029                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4030                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4031         }
4032
4033         /* disable drop for each vf */
4034         for (int i = 0; i < adapter->num_vfs; i++) {
4035                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4036                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4037         }
4038 } /* ixgbe_disable_rx_drop */
4039
4040 /************************************************************************
4041  * ixgbe_sysctl_advertise
4042  *
4043  *   SYSCTL wrapper around setting advertised speed
4044  ************************************************************************/
4045 static int
4046 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4047 {
4048         struct adapter *adapter;
4049         int            error, advertise;
4050
4051         adapter = (struct adapter *)arg1;
4052         advertise = adapter->advertise;
4053
4054         error = sysctl_handle_int(oidp, &advertise, 0, req);
4055         if ((error) || (req->newptr == NULL))
4056                 return (error);
4057
4058         return ixgbe_set_advertise(adapter, advertise);
4059 } /* ixgbe_sysctl_advertise */
4060
4061 /************************************************************************
4062  * ixgbe_set_advertise - Control advertised link speed
4063  *
4064  *   Flags:
4065  *     0x1 - advertise 100 Mb
4066  *     0x2 - advertise 1G
4067  *     0x4 - advertise 10G
4068  *     0x8 - advertise 10 Mb (yes, Mb)
4069  ************************************************************************/
4070 static int
4071 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4072 {
4073         device_t         dev;
4074         struct ixgbe_hw  *hw;
4075         ixgbe_link_speed speed = 0;
4076         ixgbe_link_speed link_caps = 0;
4077         s32              err = IXGBE_NOT_IMPLEMENTED;
4078         bool             negotiate = FALSE;
4079
4080         /* Checks to validate new value */
4081         if (adapter->advertise == advertise) /* no change */
4082                 return (0);
4083
4084         dev = adapter->dev;
4085         hw = &adapter->hw;
4086
4087         /* No speed changes for backplane media */
4088         if (hw->phy.media_type == ixgbe_media_type_backplane)
4089                 return (ENODEV);
4090
4091         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4092               (hw->phy.multispeed_fiber))) {
4093                 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4094                 return (EINVAL);
4095         }
4096
4097         if (advertise < 0x1 || advertise > 0xF) {
4098                 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4099                 return (EINVAL);
4100         }
4101
4102         if (hw->mac.ops.get_link_capabilities) {
4103                 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4104                     &negotiate);
4105                 if (err != IXGBE_SUCCESS) {
4106                         device_printf(dev, "Unable to determine supported advertise speeds\n");
4107                         return (ENODEV);
4108                 }
4109         }
4110
4111         /* Set new value and report new advertised mode */
4112         if (advertise & 0x1) {
4113                 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4114                         device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4115                         return (EINVAL);
4116                 }
4117                 speed |= IXGBE_LINK_SPEED_100_FULL;
4118         }
4119         if (advertise & 0x2) {
4120                 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4121                         device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4122                         return (EINVAL);
4123                 }
4124                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4125         }
4126         if (advertise & 0x4) {
4127                 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4128                         device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4129                         return (EINVAL);
4130                 }
4131                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4132         }
4133         if (advertise & 0x8) {
4134                 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4135                         device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4136                         return (EINVAL);
4137                 }
4138                 speed |= IXGBE_LINK_SPEED_10_FULL;
4139         }
4140
4141         hw->mac.autotry_restart = TRUE;
4142         hw->mac.ops.setup_link(hw, speed, TRUE);
4143         adapter->advertise = advertise;
4144
4145         return (0);
4146 } /* ixgbe_set_advertise */
4147
4148 /************************************************************************
4149  * ixgbe_get_advertise - Get current advertised speed settings
4150  *
4151  *   Formatted for sysctl usage.
4152  *   Flags:
4153  *     0x1 - advertise 100 Mb
4154  *     0x2 - advertise 1G
4155  *     0x4 - advertise 10G
4156  *     0x8 - advertise 10 Mb (yes, Mb)
4157  ************************************************************************/
4158 static int
4159 ixgbe_get_advertise(struct adapter *adapter)
4160 {
4161         struct ixgbe_hw  *hw = &adapter->hw;
4162         int              speed;
4163         ixgbe_link_speed link_caps = 0;
4164         s32              err;
4165         bool             negotiate = FALSE;
4166
4167         /*
4168          * Advertised speed means nothing unless it's copper or
4169          * multi-speed fiber
4170          */
4171         if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4172             !(hw->phy.multispeed_fiber))
4173                 return (0);
4174
4175         err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4176         if (err != IXGBE_SUCCESS)
4177                 return (0);
4178
4179         speed =
4180             ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4181             ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4182             ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4183             ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4184
4185         return speed;
4186 } /* ixgbe_get_advertise */
4187
4188 /************************************************************************
4189  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4190  *
4191  *   Control values:
4192  *     0/1 - off / on (use default value of 1000)
4193  *
4194  *     Legal timer values are:
4195  *     50,100,250,500,1000,2000,5000,10000
4196  *
4197  *     Turning off interrupt moderation will also turn this off.
4198  ************************************************************************/
4199 static int
4200 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4201 {
4202         struct adapter *adapter = (struct adapter *)arg1;
4203         struct ifnet   *ifp = adapter->ifp;
4204         int            error;
4205         u32            newval;
4206
4207         newval = adapter->dmac;
4208         error = sysctl_handle_int(oidp, &newval, 0, req);
4209         if ((error) || (req->newptr == NULL))
4210                 return (error);
4211
4212         switch (newval) {
4213         case 0:
4214                 /* Disabled */
4215                 adapter->dmac = 0;
4216                 break;
4217         case 1:
4218                 /* Enable and use default */
4219                 adapter->dmac = 1000;
4220                 break;
4221         case 50:
4222         case 100:
4223         case 250:
4224         case 500:
4225         case 1000:
4226         case 2000:
4227         case 5000:
4228         case 10000:
4229                 /* Legal values - allow */
4230                 adapter->dmac = newval;
4231                 break;
4232         default:
4233                 /* Do nothing, illegal value */
4234                 return (EINVAL);
4235         }
4236
4237         /* Re-initialize hardware if it's already running */
4238         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4239                 ixgbe_init(adapter);
4240
4241         return (0);
4242 } /* ixgbe_sysctl_dmac */
4243
4244 #ifdef IXGBE_DEBUG
4245 /************************************************************************
4246  * ixgbe_sysctl_power_state
4247  *
4248  *   Sysctl to test power states
4249  *   Values:
4250  *     0      - set device to D0
4251  *     3      - set device to D3
4252  *     (none) - get current device power state
4253  ************************************************************************/
4254 static int
4255 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4256 {
4257         struct adapter *adapter = (struct adapter *)arg1;
4258         device_t       dev = adapter->dev;
4259         int            curr_ps, new_ps, error = 0;
4260
4261         curr_ps = new_ps = pci_get_powerstate(dev);
4262
4263         error = sysctl_handle_int(oidp, &new_ps, 0, req);
4264         if ((error) || (req->newptr == NULL))
4265                 return (error);
4266
4267         if (new_ps == curr_ps)
4268                 return (0);
4269
4270         if (new_ps == 3 && curr_ps == 0)
4271                 error = DEVICE_SUSPEND(dev);
4272         else if (new_ps == 0 && curr_ps == 3)
4273                 error = DEVICE_RESUME(dev);
4274         else
4275                 return (EINVAL);
4276
4277         device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4278
4279         return (error);
4280 } /* ixgbe_sysctl_power_state */
4281 #endif
4282
4283 /************************************************************************
4284  * ixgbe_sysctl_wol_enable
4285  *
4286  *   Sysctl to enable/disable the WoL capability,
4287  *   if supported by the adapter.
4288  *
4289  *   Values:
4290  *     0 - disabled
4291  *     1 - enabled
4292  ************************************************************************/
4293 static int
4294 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4295 {
4296         struct adapter  *adapter = (struct adapter *)arg1;
4297         struct ixgbe_hw *hw = &adapter->hw;
4298         int             new_wol_enabled;
4299         int             error = 0;
4300
4301         new_wol_enabled = hw->wol_enabled;
4302         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4303         if ((error) || (req->newptr == NULL))
4304                 return (error);
4305         new_wol_enabled = !!(new_wol_enabled);
4306         if (new_wol_enabled == hw->wol_enabled)
4307                 return (0);
4308
4309         if (new_wol_enabled > 0 && !adapter->wol_support)
4310                 return (ENODEV);
4311         else
4312                 hw->wol_enabled = new_wol_enabled;
4313
4314         return (0);
4315 } /* ixgbe_sysctl_wol_enable */
4316
4317 /************************************************************************
4318  * ixgbe_sysctl_wufc - Wake Up Filter Control
4319  *
4320  *   Sysctl to enable/disable the types of packets that the
4321  *   adapter will wake up on upon receipt.
4322  *   Flags:
4323  *     0x1  - Link Status Change
4324  *     0x2  - Magic Packet
4325  *     0x4  - Direct Exact
4326  *     0x8  - Directed Multicast
4327  *     0x10 - Broadcast
4328  *     0x20 - ARP/IPv4 Request Packet
4329  *     0x40 - Direct IPv4 Packet
4330  *     0x80 - Direct IPv6 Packet
4331  *
4332  *   Settings not listed above will cause the sysctl to return an error.
4333  ************************************************************************/
4334 static int
4335 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4336 {
4337         struct adapter *adapter = (struct adapter *)arg1;
4338         int            error = 0;
4339         u32            new_wufc;
4340
4341         new_wufc = adapter->wufc;
4342
4343         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4344         if ((error) || (req->newptr == NULL))
4345                 return (error);
4346         if (new_wufc == adapter->wufc)
4347                 return (0);
4348
4349         if (new_wufc & 0xffffff00)
4350                 return (EINVAL);
4351
4352         new_wufc &= 0xff;
4353         new_wufc |= (0xffffff & adapter->wufc);
4354         adapter->wufc = new_wufc;
4355
4356         return (0);
4357 } /* ixgbe_sysctl_wufc */
4358
4359 #ifdef IXGBE_DEBUG
4360 /************************************************************************
4361  * ixgbe_sysctl_print_rss_config
4362  ************************************************************************/
4363 static int
4364 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4365 {
4366         struct adapter  *adapter = (struct adapter *)arg1;
4367         struct ixgbe_hw *hw = &adapter->hw;
4368         device_t        dev = adapter->dev;
4369         struct sbuf     *buf;
4370         int             error = 0, reta_size;
4371         u32             reg;
4372
4373         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4374         if (!buf) {
4375                 device_printf(dev, "Could not allocate sbuf for output.\n");
4376                 return (ENOMEM);
4377         }
4378
4379         // TODO: use sbufs to make a string to print out
4380         /* Set multiplier for RETA setup and table size based on MAC */
4381         switch (adapter->hw.mac.type) {
4382         case ixgbe_mac_X550:
4383         case ixgbe_mac_X550EM_x:
4384         case ixgbe_mac_X550EM_a:
4385                 reta_size = 128;
4386                 break;
4387         default:
4388                 reta_size = 32;
4389                 break;
4390         }
4391
4392         /* Print out the redirection table */
4393         sbuf_cat(buf, "\n");
4394         for (int i = 0; i < reta_size; i++) {
4395                 if (i < 32) {
4396                         reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4397                         sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4398                 } else {
4399                         reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4400                         sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4401                 }
4402         }
4403
4404         // TODO: print more config
4405
4406         error = sbuf_finish(buf);
4407         if (error)
4408                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4409
4410         sbuf_delete(buf);
4411
4412         return (0);
4413 } /* ixgbe_sysctl_print_rss_config */
4414 #endif /* IXGBE_DEBUG */
4415
4416 /************************************************************************
4417  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4418  *
4419  *   For X552/X557-AT devices using an external PHY
4420  ************************************************************************/
4421 static int
4422 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4423 {
4424         struct adapter  *adapter = (struct adapter *)arg1;
4425         struct ixgbe_hw *hw = &adapter->hw;
4426         u16             reg;
4427
4428         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4429                 device_printf(adapter->dev,
4430                     "Device has no supported external thermal sensor.\n");
4431                 return (ENODEV);
4432         }
4433
4434         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4435             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4436                 device_printf(adapter->dev,
4437                     "Error reading from PHY's current temperature register\n");
4438                 return (EAGAIN);
4439         }
4440
4441         /* Shift temp for output */
4442         reg = reg >> 8;
4443
4444         return (sysctl_handle_int(oidp, NULL, reg, req));
4445 } /* ixgbe_sysctl_phy_temp */
4446
4447 /************************************************************************
4448  * ixgbe_sysctl_phy_overtemp_occurred
4449  *
4450  *   Reports (directly from the PHY) whether the current PHY
4451  *   temperature is over the overtemp threshold.
4452  ************************************************************************/
4453 static int
4454 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4455 {
4456         struct adapter  *adapter = (struct adapter *)arg1;
4457         struct ixgbe_hw *hw = &adapter->hw;
4458         u16             reg;
4459
4460         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4461                 device_printf(adapter->dev,
4462                     "Device has no supported external thermal sensor.\n");
4463                 return (ENODEV);
4464         }
4465
4466         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4467             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4468                 device_printf(adapter->dev,
4469                     "Error reading from PHY's temperature status register\n");
4470                 return (EAGAIN);
4471         }
4472
4473         /* Get occurrence bit */
4474         reg = !!(reg & 0x4000);
4475
4476         return (sysctl_handle_int(oidp, 0, reg, req));
4477 } /* ixgbe_sysctl_phy_overtemp_occurred */
4478
4479 /************************************************************************
4480  * ixgbe_sysctl_eee_state
4481  *
4482  *   Sysctl to set EEE power saving feature
4483  *   Values:
4484  *     0      - disable EEE
4485  *     1      - enable EEE
4486  *     (none) - get current device EEE state
4487  ************************************************************************/
4488 static int
4489 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4490 {
4491         struct adapter *adapter = (struct adapter *)arg1;
4492         device_t       dev = adapter->dev;
4493         int            curr_eee, new_eee, error = 0;
4494         s32            retval;
4495
4496         curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4497
4498         error = sysctl_handle_int(oidp, &new_eee, 0, req);
4499         if ((error) || (req->newptr == NULL))
4500                 return (error);
4501
4502         /* Nothing to do */
4503         if (new_eee == curr_eee)
4504                 return (0);
4505
4506         /* Not supported */
4507         if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4508                 return (EINVAL);
4509
4510         /* Bounds checking */
4511         if ((new_eee < 0) || (new_eee > 1))
4512                 return (EINVAL);
4513
4514         retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4515         if (retval) {
4516                 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4517                 return (EINVAL);
4518         }
4519
4520         /* Restart auto-neg */
4521         ixgbe_init(adapter);
4522
4523         device_printf(dev, "New EEE state: %d\n", new_eee);
4524
4525         /* Cache new value */
4526         if (new_eee)
4527                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4528         else
4529                 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4530
4531         return (error);
4532 } /* ixgbe_sysctl_eee_state */
4533
4534 /************************************************************************
4535  * ixgbe_init_device_features
4536  ************************************************************************/
4537 static void
4538 ixgbe_init_device_features(struct adapter *adapter)
4539 {
4540         adapter->feat_cap = IXGBE_FEATURE_NETMAP
4541                           | IXGBE_FEATURE_RSS
4542                           | IXGBE_FEATURE_MSI
4543                           | IXGBE_FEATURE_MSIX
4544                           | IXGBE_FEATURE_LEGACY_IRQ
4545                           | IXGBE_FEATURE_LEGACY_TX;
4546
4547         /* Set capabilities first... */
4548         switch (adapter->hw.mac.type) {
4549         case ixgbe_mac_82598EB:
4550                 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4551                         adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4552                 break;
4553         case ixgbe_mac_X540:
4554                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4555                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4556                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4557                     (adapter->hw.bus.func == 0))
4558                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4559                 break;
4560         case ixgbe_mac_X550:
4561                 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4562                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4563                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4564                 break;
4565         case ixgbe_mac_X550EM_x:
4566                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4567                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4568                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4569                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4570                 break;
4571         case ixgbe_mac_X550EM_a:
4572                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4573                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4574                 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4575                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4576                     (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4577                         adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4578                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4579                 }
4580                 break;
4581         case ixgbe_mac_82599EB:
4582                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4583                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4584                 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4585                     (adapter->hw.bus.func == 0))
4586                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4587                 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4588                         adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4589                 break;
4590         default:
4591                 break;
4592         }
4593
4594         /* Enabled by default... */
4595         /* Fan failure detection */
4596         if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4597                 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4598         /* Netmap */
4599         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4600                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4601         /* EEE */
4602         if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4603                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4604         /* Thermal Sensor */
4605         if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4606                 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4607
4608         /* Enabled via global sysctl... */
4609         /* Flow Director */
4610         if (ixgbe_enable_fdir) {
4611                 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4612                         adapter->feat_en |= IXGBE_FEATURE_FDIR;
4613                 else
4614                         device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4615         }
4616         /* Legacy (single queue) transmit */
4617         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4618             ixgbe_enable_legacy_tx)
4619                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4620         /*
4621          * Message Signal Interrupts - Extended (MSI-X)
4622          * Normal MSI is only enabled if MSI-X calls fail.
4623          */
4624         if (!ixgbe_enable_msix)
4625                 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4626         /* Receive-Side Scaling (RSS) */
4627         if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4628                 adapter->feat_en |= IXGBE_FEATURE_RSS;
4629
4630         /* Disable features with unmet dependencies... */
4631         /* No MSI-X */
4632         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4633                 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4634                 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4635                 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4636                 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4637         }
4638 } /* ixgbe_init_device_features */
4639
4640 /************************************************************************
4641  * ixgbe_probe - Device identification routine
4642  *
4643  *   Determines if the driver should be loaded on
4644  *   adapter based on its PCI vendor/device ID.
4645  *
4646  *   return BUS_PROBE_DEFAULT on success, positive on failure
4647  ************************************************************************/
4648 static int
4649 ixgbe_probe(device_t dev)
4650 {
4651         ixgbe_vendor_info_t *ent;
4652
4653         u16  pci_vendor_id = 0;
4654         u16  pci_device_id = 0;
4655         u16  pci_subvendor_id = 0;
4656         u16  pci_subdevice_id = 0;
4657         char adapter_name[256];
4658
4659         INIT_DEBUGOUT("ixgbe_probe: begin");
4660
4661         pci_vendor_id = pci_get_vendor(dev);
4662         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4663                 return (ENXIO);
4664
4665         pci_device_id = pci_get_device(dev);
4666         pci_subvendor_id = pci_get_subvendor(dev);
4667         pci_subdevice_id = pci_get_subdevice(dev);
4668
4669         ent = ixgbe_vendor_info_array;
4670         while (ent->vendor_id != 0) {
4671                 if ((pci_vendor_id == ent->vendor_id) &&
4672                     (pci_device_id == ent->device_id) &&
4673                     ((pci_subvendor_id == ent->subvendor_id) ||
4674                      (ent->subvendor_id == 0)) &&
4675                     ((pci_subdevice_id == ent->subdevice_id) ||
4676                      (ent->subdevice_id == 0))) {
4677                         sprintf(adapter_name, "%s, Version - %s",
4678                                 ixgbe_strings[ent->index],
4679                                 ixgbe_driver_version);
4680                         device_set_desc_copy(dev, adapter_name);
4681                         ++ixgbe_total_ports;
4682                         return (BUS_PROBE_DEFAULT);
4683                 }
4684                 ent++;
4685         }
4686
4687         return (ENXIO);
4688 } /* ixgbe_probe */
4689
4690
4691 /************************************************************************
4692  * ixgbe_ioctl - Ioctl entry point
4693  *
4694  *   Called when the user wants to configure the interface.
4695  *
4696  *   return 0 on success, positive on failure
4697  ************************************************************************/
4698 static int
4699 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4700 {
4701         struct adapter *adapter = ifp->if_softc;
4702         struct ifreq   *ifr = (struct ifreq *) data;
4703 #if defined(INET) || defined(INET6)
4704         struct ifaddr  *ifa = (struct ifaddr *)data;
4705 #endif
4706         int            error = 0;
4707         bool           avoid_reset = FALSE;
4708
4709         switch (command) {
4710         case SIOCSIFADDR:
4711 #ifdef INET
4712                 if (ifa->ifa_addr->sa_family == AF_INET)
4713                         avoid_reset = TRUE;
4714 #endif
4715 #ifdef INET6
4716                 if (ifa->ifa_addr->sa_family == AF_INET6)
4717                         avoid_reset = TRUE;
4718 #endif
4719                 /*
4720                  * Calling init results in link renegotiation,
4721                  * so we avoid doing it when possible.
4722                  */
4723                 if (avoid_reset) {
4724                         ifp->if_flags |= IFF_UP;
4725                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4726                                 ixgbe_init(adapter);
4727 #ifdef INET
4728                         if (!(ifp->if_flags & IFF_NOARP))
4729                                 arp_ifinit(ifp, ifa);
4730 #endif
4731                 } else
4732                         error = ether_ioctl(ifp, command, data);
4733                 break;
4734         case SIOCSIFMTU:
4735                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4736                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4737                         error = EINVAL;
4738                 } else {
4739                         IXGBE_CORE_LOCK(adapter);
4740                         ifp->if_mtu = ifr->ifr_mtu;
4741                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4742                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4743                                 ixgbe_init_locked(adapter);
4744                         ixgbe_recalculate_max_frame(adapter);
4745                         IXGBE_CORE_UNLOCK(adapter);
4746                 }
4747                 break;
4748         case SIOCSIFFLAGS:
4749                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4750                 IXGBE_CORE_LOCK(adapter);
4751                 if (ifp->if_flags & IFF_UP) {
4752                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4753                                 if ((ifp->if_flags ^ adapter->if_flags) &
4754                                     (IFF_PROMISC | IFF_ALLMULTI)) {
4755                                         ixgbe_set_promisc(adapter);
4756                                 }
4757                         } else
4758                                 ixgbe_init_locked(adapter);
4759                 } else
4760                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4761                                 ixgbe_stop(adapter);
4762                 adapter->if_flags = ifp->if_flags;
4763                 IXGBE_CORE_UNLOCK(adapter);
4764                 break;
4765         case SIOCADDMULTI:
4766         case SIOCDELMULTI:
4767                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4768                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4769                         IXGBE_CORE_LOCK(adapter);
4770                         ixgbe_disable_intr(adapter);
4771                         ixgbe_set_multi(adapter);
4772                         ixgbe_enable_intr(adapter);
4773                         IXGBE_CORE_UNLOCK(adapter);
4774                 }
4775                 break;
4776         case SIOCSIFMEDIA:
4777         case SIOCGIFMEDIA:
4778                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4779                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4780                 break;
4781         case SIOCSIFCAP:
4782         {
4783                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4784
4785                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4786
4787                 if (!mask)
4788                         break;
4789
4790                 /* HW cannot turn these on/off separately */
4791                 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4792                         ifp->if_capenable ^= IFCAP_RXCSUM;
4793                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4794                 }
4795                 if (mask & IFCAP_TXCSUM)
4796                         ifp->if_capenable ^= IFCAP_TXCSUM;
4797                 if (mask & IFCAP_TXCSUM_IPV6)
4798                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4799                 if (mask & IFCAP_TSO4)
4800                         ifp->if_capenable ^= IFCAP_TSO4;
4801                 if (mask & IFCAP_TSO6)
4802                         ifp->if_capenable ^= IFCAP_TSO6;
4803                 if (mask & IFCAP_LRO)
4804                         ifp->if_capenable ^= IFCAP_LRO;
4805                 if (mask & IFCAP_VLAN_HWTAGGING)
4806                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4807                 if (mask & IFCAP_VLAN_HWFILTER)
4808                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4809                 if (mask & IFCAP_VLAN_HWTSO)
4810                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4811
4812                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4813                         IXGBE_CORE_LOCK(adapter);
4814                         ixgbe_init_locked(adapter);
4815                         IXGBE_CORE_UNLOCK(adapter);
4816                 }
4817                 VLAN_CAPABILITIES(ifp);
4818                 break;
4819         }
4820 #if __FreeBSD_version >= 1100036
4821         case SIOCGI2C:
4822         {
4823                 struct ixgbe_hw *hw = &adapter->hw;
4824                 struct ifi2creq i2c;
4825                 int i;
4826
4827                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4828                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
4829                 if (error != 0)
4830                         break;
4831                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4832                         error = EINVAL;
4833                         break;
4834                 }
4835                 if (i2c.len > sizeof(i2c.data)) {
4836                         error = EINVAL;
4837                         break;
4838                 }
4839
4840                 for (i = 0; i < i2c.len; i++)
4841                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4842                             i2c.dev_addr, &i2c.data[i]);
4843                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
4844                 break;
4845         }
4846 #endif
4847         default:
4848                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4849                 error = ether_ioctl(ifp, command, data);
4850                 break;
4851         }
4852
4853         return (error);
4854 } /* ixgbe_ioctl */
4855
4856 /************************************************************************
4857  * ixgbe_check_fan_failure
4858  ************************************************************************/
4859 static void
4860 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4861 {
4862         u32 mask;
4863
4864         mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4865             IXGBE_ESDP_SDP1;
4866
4867         if (reg & mask)
4868                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4869 } /* ixgbe_check_fan_failure */
4870
4871 /************************************************************************
4872  * ixgbe_handle_que
4873  ************************************************************************/
4874 static void
4875 ixgbe_handle_que(void *context, int pending)
4876 {
4877         struct ix_queue *que = context;
4878         struct adapter  *adapter = que->adapter;
4879         struct tx_ring  *txr = que->txr;
4880         struct ifnet    *ifp = adapter->ifp;
4881
4882         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4883                 ixgbe_rxeof(que);
4884                 IXGBE_TX_LOCK(txr);
4885                 ixgbe_txeof(txr);
4886                 if (!ixgbe_ring_empty(ifp, txr->br))
4887                         ixgbe_start_locked(ifp, txr);
4888                 IXGBE_TX_UNLOCK(txr);
4889         }
4890
4891         /* Re-enable this interrupt */
4892         if (que->res != NULL)
4893                 ixgbe_enable_queue(adapter, que->msix);
4894         else
4895                 ixgbe_enable_intr(adapter);
4896
4897         return;
4898 } /* ixgbe_handle_que */
4899
4900
4901
4902 /************************************************************************
4903  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4904  ************************************************************************/
4905 static int
4906 ixgbe_allocate_legacy(struct adapter *adapter)
4907 {
4908         device_t        dev = adapter->dev;
4909         struct ix_queue *que = adapter->queues;
4910         struct tx_ring  *txr = adapter->tx_rings;
4911         int             error;
4912
4913         /* We allocate a single interrupt resource */
4914         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4915             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4916         if (adapter->res == NULL) {
4917                 device_printf(dev,
4918                     "Unable to allocate bus resource: interrupt\n");
4919                 return (ENXIO);
4920         }
4921
4922         /*
4923          * Try allocating a fast interrupt and the associated deferred
4924          * processing contexts.
4925          */
4926         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
4927                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
4928         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
4929         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
4930             taskqueue_thread_enqueue, &que->tq);
4931         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
4932             device_get_nameunit(adapter->dev));
4933
4934         /* Tasklets for Link, SFP and Multispeed Fiber */
4935         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
4936         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
4937         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
4938         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
4939         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4940                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
4941         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
4942             taskqueue_thread_enqueue, &adapter->tq);
4943         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
4944             device_get_nameunit(adapter->dev));
4945
4946         if ((error = bus_setup_intr(dev, adapter->res,
4947             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
4948             &adapter->tag)) != 0) {
4949                 device_printf(dev,
4950                     "Failed to register fast interrupt handler: %d\n", error);
4951                 taskqueue_free(que->tq);
4952                 taskqueue_free(adapter->tq);
4953                 que->tq = NULL;
4954                 adapter->tq = NULL;
4955
4956                 return (error);
4957         }
4958         /* For simplicity in the handlers */
4959         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
4960
4961         return (0);
4962 } /* ixgbe_allocate_legacy */
4963
4964
4965 /************************************************************************
4966  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
4967  ************************************************************************/
4968 static int
4969 ixgbe_allocate_msix(struct adapter *adapter)
4970 {
4971         device_t        dev = adapter->dev;
4972         struct ix_queue *que = adapter->queues;
4973         struct tx_ring  *txr = adapter->tx_rings;
4974         int             error, rid, vector = 0;
4975         int             cpu_id = 0;
4976         unsigned int    rss_buckets = 0;
4977         cpuset_t        cpu_mask;
4978
4979         /*
4980          * If we're doing RSS, the number of queues needs to
4981          * match the number of RSS buckets that are configured.
4982          *
4983          * + If there's more queues than RSS buckets, we'll end
4984          *   up with queues that get no traffic.
4985          *
4986          * + If there's more RSS buckets than queues, we'll end
4987          *   up having multiple RSS buckets map to the same queue,
4988          *   so there'll be some contention.
4989          */
4990         rss_buckets = rss_getnumbuckets();
4991         if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
4992             (adapter->num_queues != rss_buckets)) {
4993                 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
4994                     __func__, adapter->num_queues, rss_buckets);
4995         }
4996
4997         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
4998                 rid = vector + 1;
4999                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
5000                     RF_SHAREABLE | RF_ACTIVE);
5001                 if (que->res == NULL) {
5002                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5003                             vector);
5004                         return (ENXIO);
5005                 }
5006                 /* Set the handler function */
5007                 error = bus_setup_intr(dev, que->res,
5008                     INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5009                     &que->tag);
5010                 if (error) {
5011                         que->res = NULL;
5012                         device_printf(dev, "Failed to register QUE handler");
5013                         return (error);
5014                 }
5015 #if __FreeBSD_version >= 800504
5016                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5017 #endif
5018                 que->msix = vector;
5019                 adapter->active_queues |= (u64)(1 << que->msix);
5020
5021                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5022                         /*
5023                          * The queue ID is used as the RSS layer bucket ID.
5024                          * We look up the queue ID -> RSS CPU ID and select
5025                          * that.
5026                          */
5027                         cpu_id = rss_getcpu(i % rss_buckets);
5028                         CPU_SETOF(cpu_id, &cpu_mask);
5029                 } else {
5030                         /*
5031                          * Bind the MSI-X vector, and thus the
5032                          * rings to the corresponding CPU.
5033                          *
5034                          * This just happens to match the default RSS
5035                          * round-robin bucket -> queue -> CPU allocation.
5036                          */
5037                         if (adapter->num_queues > 1)
5038                                 cpu_id = i;
5039                 }
5040                 if (adapter->num_queues > 1)
5041                         bus_bind_intr(dev, que->res, cpu_id);
5042 #ifdef IXGBE_DEBUG
5043                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5044                         device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5045                             cpu_id);
5046                 else
5047                         device_printf(dev, "Bound queue %d to cpu %d\n", i,
5048                             cpu_id);
5049 #endif /* IXGBE_DEBUG */
5050
5051
5052                 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5053                         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5054                             txr);
5055                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5056                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5057                     taskqueue_thread_enqueue, &que->tq);
5058 #if __FreeBSD_version < 1100000
5059                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5060                     device_get_nameunit(adapter->dev), i);
5061 #else
5062                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5063                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5064                             &cpu_mask, "%s (bucket %d)",
5065                             device_get_nameunit(adapter->dev), cpu_id);
5066                 else
5067                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5068                             NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5069                             i);
5070 #endif
5071         }
5072
5073         /* and Link */
5074         adapter->link_rid = vector + 1;
5075         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5076             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5077         if (!adapter->res) {
5078                 device_printf(dev,
5079                     "Unable to allocate bus resource: Link interrupt [%d]\n",
5080                     adapter->link_rid);
5081                 return (ENXIO);
5082         }
5083         /* Set the link handler function */
5084         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5085             NULL, ixgbe_msix_link, adapter, &adapter->tag);
5086         if (error) {
5087                 adapter->res = NULL;
5088                 device_printf(dev, "Failed to register LINK handler");
5089                 return (error);
5090         }
5091 #if __FreeBSD_version >= 800504
5092         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5093 #endif
5094         adapter->vector = vector;
5095         /* Tasklets for Link, SFP and Multispeed Fiber */
5096         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
5097         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
5098         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
5099         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5100                 TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
5101         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
5102         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5103                 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
5104         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
5105             taskqueue_thread_enqueue, &adapter->tq);
5106         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
5107             device_get_nameunit(adapter->dev));
5108
5109         return (0);
5110 } /* ixgbe_allocate_msix */
5111
5112 /************************************************************************
5113  * ixgbe_configure_interrupts
5114  *
5115  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5116  *   This will also depend on user settings.
5117  ************************************************************************/
5118 static int
5119 ixgbe_configure_interrupts(struct adapter *adapter)
5120 {
5121         device_t dev = adapter->dev;
5122         int      rid, want, queues, msgs;
5123
5124         /* Default to 1 queue if MSI-X setup fails */
5125         adapter->num_queues = 1;
5126
5127         /* Override by tuneable */
5128         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5129                 goto msi;
5130
5131         /* First try MSI-X */
5132         msgs = pci_msix_count(dev);
5133         if (msgs == 0)
5134                 goto msi;
5135         rid = PCIR_BAR(MSIX_82598_BAR);
5136         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5137             RF_ACTIVE);
5138         if (adapter->msix_mem == NULL) {
5139                 rid += 4;  /* 82599 maps in higher BAR */
5140                 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5141                     &rid, RF_ACTIVE);
5142         }
5143         if (adapter->msix_mem == NULL) {
5144                 /* May not be enabled */
5145                 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5146                 goto msi;
5147         }
5148
5149         /* Figure out a reasonable auto config value */
5150         queues = min(mp_ncpus, msgs - 1);
5151         /* If we're doing RSS, clamp at the number of RSS buckets */
5152         if (adapter->feat_en & IXGBE_FEATURE_RSS)
5153                 queues = min(queues, rss_getnumbuckets());
5154         if (ixgbe_num_queues > queues) {
5155                 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5156                 ixgbe_num_queues = queues;
5157         }
5158
5159         if (ixgbe_num_queues != 0)
5160                 queues = ixgbe_num_queues;
5161         /* Set max queues to 8 when autoconfiguring */
5162         else
5163                 queues = min(queues, 8);
5164
5165         /* reflect correct sysctl value */
5166         ixgbe_num_queues = queues;
5167
5168         /*
5169          * Want one vector (RX/TX pair) per queue
5170          * plus an additional for Link.
5171          */
5172         want = queues + 1;
5173         if (msgs >= want)
5174                 msgs = want;
5175         else {
5176                 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5177                     msgs, want);
5178                 goto msi;
5179         }
5180         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5181                 device_printf(adapter->dev,
5182                     "Using MSI-X interrupts with %d vectors\n", msgs);
5183                 adapter->num_queues = queues;
5184                 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5185                 return (0);
5186         }
5187         /*
5188          * MSI-X allocation failed or provided us with
5189          * less vectors than needed. Free MSI-X resources
5190          * and we'll try enabling MSI.
5191          */
5192         pci_release_msi(dev);
5193
5194 msi:
5195         /* Without MSI-X, some features are no longer supported */
5196         adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5197         adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5198         adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5199         adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5200
5201         if (adapter->msix_mem != NULL) {
5202                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5203                     adapter->msix_mem);
5204                 adapter->msix_mem = NULL;
5205         }
5206         msgs = 1;
5207         if (pci_alloc_msi(dev, &msgs) == 0) {
5208                 adapter->feat_en |= IXGBE_FEATURE_MSI;
5209                 adapter->link_rid = 1;
5210                 device_printf(adapter->dev, "Using an MSI interrupt\n");
5211                 return (0);
5212         }
5213
5214         if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5215                 device_printf(adapter->dev,
5216                     "Device does not support legacy interrupts.\n");
5217                 return 1;
5218         }
5219
5220         adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5221         adapter->link_rid = 0;
5222         device_printf(adapter->dev, "Using a Legacy interrupt\n");
5223
5224         return (0);
5225 } /* ixgbe_configure_interrupts */
5226
5227
5228 /************************************************************************
5229  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5230  *
5231  *   Done outside of interrupt context since the driver might sleep
5232  ************************************************************************/
5233 static void
5234 ixgbe_handle_link(void *context, int pending)
5235 {
5236         struct adapter  *adapter = context;
5237         struct ixgbe_hw *hw = &adapter->hw;
5238
5239         ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5240         ixgbe_update_link_status(adapter);
5241
5242         /* Re-enable link interrupts */
5243         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5244 } /* ixgbe_handle_link */
5245
5246 /************************************************************************
5247  * ixgbe_rearm_queues
5248  ************************************************************************/
5249 static void
5250 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5251 {
5252         u32 mask;
5253
5254         switch (adapter->hw.mac.type) {
5255         case ixgbe_mac_82598EB:
5256                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5257                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5258                 break;
5259         case ixgbe_mac_82599EB:
5260         case ixgbe_mac_X540:
5261         case ixgbe_mac_X550:
5262         case ixgbe_mac_X550EM_x:
5263         case ixgbe_mac_X550EM_a:
5264                 mask = (queues & 0xFFFFFFFF);
5265                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5266                 mask = (queues >> 32);
5267                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5268                 break;
5269         default:
5270                 break;
5271         }
5272 } /* ixgbe_rearm_queues */
5273