]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ix.c
Fix insufficient oce(4) ioctl(2) privilege checking.
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41
42 #include "ixgbe.h"
43
44 /************************************************************************
45  * Driver version
46  ************************************************************************/
47 char ixgbe_driver_version[] = "3.2.12-k";
48
49
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixgbe_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
60 {
61         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
95         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
96         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
97         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
98         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
99         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
100         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
101         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
102         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
103         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
104         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
105         /* required last entry */
106         {0, 0, 0, 0, 0}
107 };
108
109 /************************************************************************
110  * Table of branding strings
111  ************************************************************************/
112 static char    *ixgbe_strings[] = {
113         "Intel(R) PRO/10GbE PCI-Express Network Driver"
114 };
115
116 /************************************************************************
117  * Function prototypes
118  ************************************************************************/
119 static int      ixgbe_probe(device_t);
120 static int      ixgbe_attach(device_t);
121 static int      ixgbe_detach(device_t);
122 static int      ixgbe_shutdown(device_t);
123 static int      ixgbe_suspend(device_t);
124 static int      ixgbe_resume(device_t);
125 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
126 static void     ixgbe_init(void *);
127 static void     ixgbe_init_locked(struct adapter *);
128 static void     ixgbe_stop(void *);
129 #if __FreeBSD_version >= 1100036
130 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
131 #endif
132 static void     ixgbe_init_device_features(struct adapter *);
133 static void     ixgbe_check_fan_failure(struct adapter *, u32, bool);
134 static void     ixgbe_add_media_types(struct adapter *);
135 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
136 static int      ixgbe_media_change(struct ifnet *);
137 static int      ixgbe_allocate_pci_resources(struct adapter *);
138 static void     ixgbe_get_slot_info(struct adapter *);
139 static int      ixgbe_allocate_msix(struct adapter *);
140 static int      ixgbe_allocate_legacy(struct adapter *);
141 static int      ixgbe_configure_interrupts(struct adapter *);
142 static void     ixgbe_free_pci_resources(struct adapter *);
143 static void     ixgbe_local_timer(void *);
144 static int      ixgbe_setup_interface(device_t, struct adapter *);
145 static void     ixgbe_config_gpie(struct adapter *);
146 static void     ixgbe_config_dmac(struct adapter *);
147 static void     ixgbe_config_delay_values(struct adapter *);
148 static void     ixgbe_config_link(struct adapter *);
149 static void     ixgbe_check_wol_support(struct adapter *);
150 static int      ixgbe_setup_low_power_mode(struct adapter *);
151 static void     ixgbe_rearm_queues(struct adapter *, u64);
152
153 static void     ixgbe_initialize_transmit_units(struct adapter *);
154 static void     ixgbe_initialize_receive_units(struct adapter *);
155 static void     ixgbe_enable_rx_drop(struct adapter *);
156 static void     ixgbe_disable_rx_drop(struct adapter *);
157 static void     ixgbe_initialize_rss_mapping(struct adapter *);
158
159 static void     ixgbe_enable_intr(struct adapter *, bool);
160 static void     ixgbe_disable_intr(struct adapter *, bool);
161 static void     ixgbe_update_stats_counters(struct adapter *);
162 static void     ixgbe_set_promisc(struct adapter *);
163 static void     ixgbe_set_multi(struct adapter *);
164 static void     ixgbe_update_link_status(struct adapter *);
165 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
166 static void     ixgbe_configure_ivars(struct adapter *);
167 static u8       *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
168
169 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
170 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
171 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
172
173 static void     ixgbe_add_device_sysctls(struct adapter *);
174 static void     ixgbe_add_hw_stats(struct adapter *);
175 static int      ixgbe_set_flowcntl(struct adapter *, int);
176 static int      ixgbe_set_advertise(struct adapter *, int);
177 static int      ixgbe_get_advertise(struct adapter *);
178
179 /* Sysctl handlers */
180 static void     ixgbe_set_sysctl_value(struct adapter *, const char *,
181                                        const char *, int *, int);
182 static int      ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
183 static int      ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
184 static int      ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS);
185 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
186 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
187 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
188 #ifdef IXGBE_DEBUG
189 static int      ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS);
190 static int      ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS);
191 #endif
192 static int      ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS);
193 static int      ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS);
194 static int      ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS);
195 static int      ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
196 static int      ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
197 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
198 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
199
200 /* Support for pluggable optic modules */
201 static bool     ixgbe_sfp_probe(struct adapter *);
202
203 /* Legacy (single vector) interrupt handler */
204 static void     ixgbe_legacy_irq(void *);
205
206 /* The MSI/MSI-X Interrupt handlers */
207 static void     ixgbe_msix_que(void *);
208 static void     ixgbe_msix_link(void *);
209
210 /* Deferred interrupt tasklets */
211 static void     ixgbe_handle_que(void *, int);
212 static void     ixgbe_handle_link(void *);
213 static void     ixgbe_handle_msf(void *);
214 static void     ixgbe_handle_mod(void *);
215 static void     ixgbe_handle_phy(void *);
216 static void     ixgbe_handle_admin_task(void *, int);
217
218
219 /************************************************************************
220  *  FreeBSD Device Interface Entry Points
221  ************************************************************************/
222 static device_method_t ix_methods[] = {
223         /* Device interface */
224         DEVMETHOD(device_probe, ixgbe_probe),
225         DEVMETHOD(device_attach, ixgbe_attach),
226         DEVMETHOD(device_detach, ixgbe_detach),
227         DEVMETHOD(device_shutdown, ixgbe_shutdown),
228         DEVMETHOD(device_suspend, ixgbe_suspend),
229         DEVMETHOD(device_resume, ixgbe_resume),
230 #ifdef PCI_IOV
231         DEVMETHOD(pci_iov_init, ixgbe_init_iov),
232         DEVMETHOD(pci_iov_uninit, ixgbe_uninit_iov),
233         DEVMETHOD(pci_iov_add_vf, ixgbe_add_vf),
234 #endif /* PCI_IOV */
235         DEVMETHOD_END
236 };
237
238 static driver_t ix_driver = {
239         "ix", ix_methods, sizeof(struct adapter),
240 };
241
242 devclass_t ix_devclass;
243 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
244
245 MODULE_DEPEND(ix, pci, 1, 1, 1);
246 MODULE_DEPEND(ix, ether, 1, 1, 1);
247 #ifdef DEV_NETMAP
248 MODULE_DEPEND(ix, netmap, 1, 1, 1);
249 #endif
250
251 /*
252  * TUNEABLE PARAMETERS:
253  */
254
255 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0, "IXGBE driver parameters");
256
257 /*
258  * AIM: Adaptive Interrupt Moderation
259  * which means that the interrupt rate
260  * is varied over time based on the
261  * traffic for that interrupt vector
262  */
263 static int ixgbe_enable_aim = TRUE;
264 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
265     "Enable adaptive interrupt moderation");
266
267 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
268 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
269     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
270
271 /* How many packets rxeof tries to clean at a time */
272 static int ixgbe_rx_process_limit = 256;
273 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
274     &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
275
276 /* How many packets txeof tries to clean at a time */
277 static int ixgbe_tx_process_limit = 256;
278 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
279     &ixgbe_tx_process_limit, 0,
280     "Maximum number of sent packets to process at a time, -1 means unlimited");
281
282 /* Flow control setting, default to full */
283 static int ixgbe_flow_control = ixgbe_fc_full;
284 SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
285     &ixgbe_flow_control, 0, "Default flow control used for all adapters");
286
287 /* Advertise Speed, default to 0 (auto) */
288 static int ixgbe_advertise_speed = 0;
289 SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN,
290     &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters");
291
292 /*
293  * Smart speed setting, default to on
294  * this only works as a compile option
295  * right now as its during attach, set
296  * this to 'ixgbe_smart_speed_off' to
297  * disable.
298  */
299 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
300
301 /*
302  * MSI-X should be the default for best performance,
303  * but this allows it to be forced off for testing.
304  */
305 static int ixgbe_enable_msix = 1;
306 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
307     "Enable MSI-X interrupts");
308
309 /*
310  * Number of Queues, can be set to 0,
311  * it then autoconfigures based on the
312  * number of cpus with a max of 8. This
313  * can be overriden manually here.
314  */
315 static int ixgbe_num_queues = 0;
316 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
317     "Number of queues to configure, 0 indicates autoconfigure");
318
319 /*
320  * Number of TX descriptors per ring,
321  * setting higher than RX as this seems
322  * the better performing choice.
323  */
324 static int ixgbe_txd = PERFORM_TXD;
325 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
326     "Number of transmit descriptors per queue");
327
328 /* Number of RX descriptors per ring */
329 static int ixgbe_rxd = PERFORM_RXD;
330 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
331     "Number of receive descriptors per queue");
332
333 /*
334  * Defining this on will allow the use
335  * of unsupported SFP+ modules, note that
336  * doing so you are on your own :)
337  */
338 static int allow_unsupported_sfp = FALSE;
339 SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
340     &allow_unsupported_sfp, 0,
341     "Allow unsupported SFP modules...use at your own risk");
342
343 /*
344  * Not sure if Flow Director is fully baked,
345  * so we'll default to turning it off.
346  */
347 static int ixgbe_enable_fdir = 0;
348 SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
349     "Enable Flow Director");
350
351 /* Legacy Transmit (single queue) */
352 static int ixgbe_enable_legacy_tx = 0;
353 SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
354     &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
355
356 /* Receive-Side Scaling */
357 static int ixgbe_enable_rss = 1;
358 SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
359     "Enable Receive-Side Scaling (RSS)");
360
361 /* Keep running tab on them for sanity check */
362 static int ixgbe_total_ports;
363
364 static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
365 static int (*ixgbe_ring_empty)(struct ifnet *, struct buf_ring *);
366
367 MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations");
368
369 /************************************************************************
370  * ixgbe_initialize_rss_mapping
371  ************************************************************************/
372 static void
373 ixgbe_initialize_rss_mapping(struct adapter *adapter)
374 {
375         struct ixgbe_hw *hw = &adapter->hw;
376         u32             reta = 0, mrqc, rss_key[10];
377         int             queue_id, table_size, index_mult;
378         int             i, j;
379         u32             rss_hash_config;
380
381         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
382                 /* Fetch the configured RSS key */
383                 rss_getkey((uint8_t *)&rss_key);
384         } else {
385                 /* set up random bits */
386                 arc4rand(&rss_key, sizeof(rss_key), 0);
387         }
388
389         /* Set multiplier for RETA setup and table size based on MAC */
390         index_mult = 0x1;
391         table_size = 128;
392         switch (adapter->hw.mac.type) {
393         case ixgbe_mac_82598EB:
394                 index_mult = 0x11;
395                 break;
396         case ixgbe_mac_X550:
397         case ixgbe_mac_X550EM_x:
398         case ixgbe_mac_X550EM_a:
399                 table_size = 512;
400                 break;
401         default:
402                 break;
403         }
404
405         /* Set up the redirection table */
406         for (i = 0, j = 0; i < table_size; i++, j++) {
407                 if (j == adapter->num_queues)
408                         j = 0;
409
410                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
411                         /*
412                          * Fetch the RSS bucket id for the given indirection
413                          * entry. Cap it at the number of configured buckets
414                          * (which is num_queues.)
415                          */
416                         queue_id = rss_get_indirection_to_bucket(i);
417                         queue_id = queue_id % adapter->num_queues;
418                 } else
419                         queue_id = (j * index_mult);
420
421                 /*
422                  * The low 8 bits are for hash value (n+0);
423                  * The next 8 bits are for hash value (n+1), etc.
424                  */
425                 reta = reta >> 8;
426                 reta = reta | (((uint32_t)queue_id) << 24);
427                 if ((i & 3) == 3) {
428                         if (i < 128)
429                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
430                         else
431                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
432                                     reta);
433                         reta = 0;
434                 }
435         }
436
437         /* Now fill our hash function seeds */
438         for (i = 0; i < 10; i++)
439                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
440
441         /* Perform hash on these packet types */
442         if (adapter->feat_en & IXGBE_FEATURE_RSS)
443                 rss_hash_config = rss_gethashconfig();
444         else {
445                 /*
446                  * Disable UDP - IP fragments aren't currently being handled
447                  * and so we end up with a mix of 2-tuple and 4-tuple
448                  * traffic.
449                  */
450                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
451                                 | RSS_HASHTYPE_RSS_TCP_IPV4
452                                 | RSS_HASHTYPE_RSS_IPV6
453                                 | RSS_HASHTYPE_RSS_TCP_IPV6
454                                 | RSS_HASHTYPE_RSS_IPV6_EX
455                                 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
456         }
457
458         mrqc = IXGBE_MRQC_RSSEN;
459         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
460                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
461         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
462                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
463         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
464                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
465         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
466                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
467         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
468                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
469         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
470                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
471         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
472                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
473         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
474                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, but not supported\n",
475                     __func__);
476         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
477                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
478         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
479                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
480         mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
481         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
482 } /* ixgbe_initialize_rss_mapping */
483
484 /************************************************************************
485  * ixgbe_initialize_receive_units - Setup receive registers and features.
486  ************************************************************************/
487 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
488
489 static void
490 ixgbe_initialize_receive_units(struct adapter *adapter)
491 {
492         struct rx_ring  *rxr = adapter->rx_rings;
493         struct ixgbe_hw *hw = &adapter->hw;
494         struct ifnet    *ifp = adapter->ifp;
495         int             i, j;
496         u32             bufsz, fctrl, srrctl, rxcsum;
497         u32             hlreg;
498
499         /*
500          * Make sure receives are disabled while
501          * setting up the descriptor ring
502          */
503         ixgbe_disable_rx(hw);
504
505         /* Enable broadcasts */
506         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
507         fctrl |= IXGBE_FCTRL_BAM;
508         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
509                 fctrl |= IXGBE_FCTRL_DPF;
510                 fctrl |= IXGBE_FCTRL_PMCF;
511         }
512         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
513
514         /* Set for Jumbo Frames? */
515         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
516         if (ifp->if_mtu > ETHERMTU)
517                 hlreg |= IXGBE_HLREG0_JUMBOEN;
518         else
519                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
520
521 #ifdef DEV_NETMAP
522         /* CRC stripping is conditional in Netmap */
523         if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
524             (ifp->if_capenable & IFCAP_NETMAP) &&
525             !ix_crcstrip)
526                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
527         else
528 #endif /* DEV_NETMAP */
529                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
530
531         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
532
533         bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
534             IXGBE_SRRCTL_BSIZEPKT_SHIFT;
535
536         for (i = 0; i < adapter->num_queues; i++, rxr++) {
537                 u64 rdba = rxr->rxdma.dma_paddr;
538                 j = rxr->me;
539
540                 /* Setup the Base and Length of the Rx Descriptor Ring */
541                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
542                     (rdba & 0x00000000ffffffffULL));
543                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
544                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
545                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
546
547                 /* Set up the SRRCTL register */
548                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
549                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
550                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
551                 srrctl |= bufsz;
552                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
553
554                 /*
555                  * Set DROP_EN iff we have no flow control and >1 queue.
556                  * Note that srrctl was cleared shortly before during reset,
557                  * so we do not need to clear the bit, but do it just in case
558                  * this code is moved elsewhere.
559                  */
560                 if (adapter->num_queues > 1 &&
561                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
562                         srrctl |= IXGBE_SRRCTL_DROP_EN;
563                 } else {
564                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
565                 }
566
567                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
568
569                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
570                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
571                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
572
573                 /* Set the driver rx tail address */
574                 rxr->tail =  IXGBE_RDT(rxr->me);
575         }
576
577         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
578                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
579                             | IXGBE_PSRTYPE_UDPHDR
580                             | IXGBE_PSRTYPE_IPV4HDR
581                             | IXGBE_PSRTYPE_IPV6HDR;
582                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
583         }
584
585         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
586
587         ixgbe_initialize_rss_mapping(adapter);
588
589         if (adapter->num_queues > 1) {
590                 /* RSS and RX IPP Checksum are mutually exclusive */
591                 rxcsum |= IXGBE_RXCSUM_PCSD;
592         }
593
594         if (ifp->if_capenable & IFCAP_RXCSUM)
595                 rxcsum |= IXGBE_RXCSUM_PCSD;
596
597         /* This is useful for calculating UDP/IP fragment checksums */
598         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
599                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
600
601         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
602
603         return;
604 } /* ixgbe_initialize_receive_units */
605
606 /************************************************************************
607  * ixgbe_initialize_transmit_units - Enable transmit units.
608  ************************************************************************/
609 static void
610 ixgbe_initialize_transmit_units(struct adapter *adapter)
611 {
612         struct tx_ring  *txr = adapter->tx_rings;
613         struct ixgbe_hw *hw = &adapter->hw;
614
615         /* Setup the Base and Length of the Tx Descriptor Ring */
616         for (int i = 0; i < adapter->num_queues; i++, txr++) {
617                 u64 tdba = txr->txdma.dma_paddr;
618                 u32 txctrl = 0;
619                 int j = txr->me;
620
621                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
622                     (tdba & 0x00000000ffffffffULL));
623                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
624                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
625                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
626
627                 /* Setup the HW Tx Head and Tail descriptor pointers */
628                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
629                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
630
631                 /* Cache the tail address */
632                 txr->tail = IXGBE_TDT(j);
633
634                 /* Disable Head Writeback */
635                 /*
636                  * Note: for X550 series devices, these registers are actually
637                  * prefixed with TPH_ isntead of DCA_, but the addresses and
638                  * fields remain the same.
639                  */
640                 switch (hw->mac.type) {
641                 case ixgbe_mac_82598EB:
642                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
643                         break;
644                 default:
645                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
646                         break;
647                 }
648                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
649                 switch (hw->mac.type) {
650                 case ixgbe_mac_82598EB:
651                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
652                         break;
653                 default:
654                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
655                         break;
656                 }
657
658         }
659
660         if (hw->mac.type != ixgbe_mac_82598EB) {
661                 u32 dmatxctl, rttdcs;
662
663                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
664                 dmatxctl |= IXGBE_DMATXCTL_TE;
665                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
666                 /* Disable arbiter to set MTQC */
667                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
668                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
669                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
670                 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
671                     ixgbe_get_mtqc(adapter->iov_mode));
672                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
673                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
674         }
675
676         return;
677 } /* ixgbe_initialize_transmit_units */
678
679 /************************************************************************
680  * ixgbe_attach - Device initialization routine
681  *
682  *   Called when the driver is being loaded.
683  *   Identifies the type of hardware, allocates all resources
684  *   and initializes the hardware.
685  *
686  *   return 0 on success, positive on failure
687  ************************************************************************/
688 static int
689 ixgbe_attach(device_t dev)
690 {
691         struct adapter  *adapter;
692         struct ixgbe_hw *hw;
693         int             error = 0;
694         u32             ctrl_ext;
695
696         INIT_DEBUGOUT("ixgbe_attach: begin");
697
698         /* Allocate, clear, and link in our adapter structure */
699         adapter = device_get_softc(dev);
700         adapter->hw.back = adapter;
701         adapter->dev = dev;
702         hw = &adapter->hw;
703
704         /* Core Lock Init*/
705         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
706
707         /* Set up the timer callout */
708         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
709
710         /* Determine hardware revision */
711         hw->vendor_id = pci_get_vendor(dev);
712         hw->device_id = pci_get_device(dev);
713         hw->revision_id = pci_get_revid(dev);
714         hw->subsystem_vendor_id = pci_get_subvendor(dev);
715         hw->subsystem_device_id = pci_get_subdevice(dev);
716
717         /*
718          * Make sure BUSMASTER is set
719          */
720         pci_enable_busmaster(dev);
721
722         /* Do base PCI setup - map BAR0 */
723         if (ixgbe_allocate_pci_resources(adapter)) {
724                 device_printf(dev, "Allocation of PCI resources failed\n");
725                 error = ENXIO;
726                 goto err_out;
727         }
728
729         /* let hardware know driver is loaded */
730         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
731         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
732         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
733
734         hw->allow_unsupported_sfp = allow_unsupported_sfp;
735
736         /*
737          * Initialize the shared code
738          */
739         if (ixgbe_init_shared_code(hw)) {
740                 device_printf(dev, "Unable to initialize the shared code\n");
741                 error = ENXIO;
742                 goto err_out;
743         }
744
745         if (hw->mbx.ops.init_params)
746                 hw->mbx.ops.init_params(hw);
747
748
749         /* Pick up the 82599 settings */
750         if (hw->mac.type != ixgbe_mac_82598EB) {
751                 hw->phy.smart_speed = ixgbe_smart_speed;
752                 adapter->num_segs = IXGBE_82599_SCATTER;
753         } else
754                 adapter->num_segs = IXGBE_82598_SCATTER;
755
756         ixgbe_init_device_features(adapter);
757
758         if (ixgbe_configure_interrupts(adapter)) {
759                 error = ENXIO;
760                 goto err_out;
761         }
762
763         /* Allocate multicast array memory. */
764         adapter->mta = malloc(sizeof(*adapter->mta) *
765             MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
766         if (adapter->mta == NULL) {
767                 device_printf(dev, "Can not allocate multicast setup array\n");
768                 error = ENOMEM;
769                 goto err_out;
770         }
771
772         /* Enable WoL (if supported) */
773         ixgbe_check_wol_support(adapter);
774
775         /* Register for VLAN events */
776         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
777             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
778         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
779             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
780
781         /* Verify adapter fan is still functional (if applicable) */
782         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
783                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
784                 ixgbe_check_fan_failure(adapter, esdp, FALSE);
785         }
786
787         /* Ensure SW/FW semaphore is free */
788         ixgbe_init_swfw_semaphore(hw);
789
790         /* Enable EEE power saving */
791         if (adapter->feat_en & IXGBE_FEATURE_EEE)
792                 hw->mac.ops.setup_eee(hw, TRUE);
793
794         /* Set an initial default flow control value */
795         hw->fc.requested_mode = ixgbe_flow_control;
796
797         /* Sysctls for limiting the amount of work done in the taskqueues */
798         ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
799             "max number of rx packets to process",
800             &adapter->rx_process_limit, ixgbe_rx_process_limit);
801
802         ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
803             "max number of tx packets to process",
804             &adapter->tx_process_limit, ixgbe_tx_process_limit);
805
806         /* Do descriptor calc and sanity checks */
807         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
808             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
809                 device_printf(dev, "TXD config issue, using default!\n");
810                 adapter->num_tx_desc = DEFAULT_TXD;
811         } else
812                 adapter->num_tx_desc = ixgbe_txd;
813
814         /*
815          * With many RX rings it is easy to exceed the
816          * system mbuf allocation. Tuning nmbclusters
817          * can alleviate this.
818          */
819         if (nmbclusters > 0) {
820                 int s;
821                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
822                 if (s > nmbclusters) {
823                         device_printf(dev, "RX Descriptors exceed system mbuf max, using default instead!\n");
824                         ixgbe_rxd = DEFAULT_RXD;
825                 }
826         }
827
828         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
829             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
830                 device_printf(dev, "RXD config issue, using default!\n");
831                 adapter->num_rx_desc = DEFAULT_RXD;
832         } else
833                 adapter->num_rx_desc = ixgbe_rxd;
834
835         /* Allocate our TX/RX Queues */
836         if (ixgbe_allocate_queues(adapter)) {
837                 error = ENOMEM;
838                 goto err_out;
839         }
840
841         hw->phy.reset_if_overtemp = TRUE;
842         error = ixgbe_reset_hw(hw);
843         hw->phy.reset_if_overtemp = FALSE;
844         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
845                 /*
846                  * No optics in this port, set up
847                  * so the timer routine will probe
848                  * for later insertion.
849                  */
850                 adapter->sfp_probe = TRUE;
851                 error = IXGBE_SUCCESS;
852         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
853                 device_printf(dev, "Unsupported SFP+ module detected!\n");
854                 error = EIO;
855                 goto err_late;
856         } else if (error) {
857                 device_printf(dev, "Hardware initialization failed\n");
858                 error = EIO;
859                 goto err_late;
860         }
861
862         /* Make sure we have a good EEPROM before we read from it */
863         if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
864                 device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
865                 error = EIO;
866                 goto err_late;
867         }
868
869         /* Setup OS specific network interface */
870         if (ixgbe_setup_interface(dev, adapter) != 0)
871                 goto err_late;
872
873         if (adapter->feat_en & IXGBE_FEATURE_MSIX)
874                 error = ixgbe_allocate_msix(adapter);
875         else
876                 error = ixgbe_allocate_legacy(adapter);
877         if (error)
878                 goto err_late;
879
880         error = ixgbe_start_hw(hw);
881         switch (error) {
882         case IXGBE_ERR_EEPROM_VERSION:
883                 device_printf(dev, "This device is a pre-production adapter/LOM.  Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
884                 break;
885         case IXGBE_ERR_SFP_NOT_SUPPORTED:
886                 device_printf(dev, "Unsupported SFP+ Module\n");
887                 error = EIO;
888                 goto err_late;
889         case IXGBE_ERR_SFP_NOT_PRESENT:
890                 device_printf(dev, "No SFP+ Module found\n");
891                 /* falls thru */
892         default:
893                 break;
894         }
895
896         /* Enable the optics for 82599 SFP+ fiber */
897         ixgbe_enable_tx_laser(hw);
898
899         /* Enable power to the phy. */
900         ixgbe_set_phy_power(hw, TRUE);
901
902         /* Initialize statistics */
903         ixgbe_update_stats_counters(adapter);
904
905         /* Check PCIE slot type/speed/width */
906         ixgbe_get_slot_info(adapter);
907
908         /*
909          * Do time init and sysctl init here, but
910          * only on the first port of a bypass adapter.
911          */
912         ixgbe_bypass_init(adapter);
913
914         /* Set an initial dmac value */
915         adapter->dmac = 0;
916         /* Set initial advertised speeds (if applicable) */
917         adapter->advertise = ixgbe_get_advertise(adapter);
918
919         if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
920                 ixgbe_define_iov_schemas(dev, &error);
921
922         /* Add sysctls */
923         ixgbe_add_device_sysctls(adapter);
924         ixgbe_add_hw_stats(adapter);
925
926         /* For Netmap */
927         adapter->init_locked = ixgbe_init_locked;
928         adapter->stop_locked = ixgbe_stop;
929
930         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
931                 ixgbe_netmap_attach(adapter);
932
933         /* Initialize Admin Task */
934         TASK_INIT(&adapter->admin_task, 0, ixgbe_handle_admin_task, adapter);
935
936         /* Initialize task queue */
937         adapter->tq = taskqueue_create_fast("ixgbe_admin", M_NOWAIT,
938             taskqueue_thread_enqueue, &adapter->tq);
939         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s admintaskq",
940             device_get_nameunit(adapter->dev));
941
942         INIT_DEBUGOUT("ixgbe_attach: end");
943
944         return (0);
945
946 err_late:
947         ixgbe_free_transmit_structures(adapter);
948         ixgbe_free_receive_structures(adapter);
949         free(adapter->queues, M_DEVBUF);
950 err_out:
951         if (adapter->ifp != NULL)
952                 if_free(adapter->ifp);
953         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
954         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
955         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
956         ixgbe_free_pci_resources(adapter);
957         free(adapter->mta, M_IXGBE);
958         IXGBE_CORE_LOCK_DESTROY(adapter);
959
960         return (error);
961 } /* ixgbe_attach */
962
963 /************************************************************************
964  * ixgbe_check_wol_support
965  *
966  *   Checks whether the adapter's ports are capable of
967  *   Wake On LAN by reading the adapter's NVM.
968  *
969  *   Sets each port's hw->wol_enabled value depending
970  *   on the value read here.
971  ************************************************************************/
972 static void
973 ixgbe_check_wol_support(struct adapter *adapter)
974 {
975         struct ixgbe_hw *hw = &adapter->hw;
976         u16             dev_caps = 0;
977
978         /* Find out WoL support for port */
979         adapter->wol_support = hw->wol_enabled = 0;
980         ixgbe_get_device_caps(hw, &dev_caps);
981         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
982             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
983              hw->bus.func == 0))
984                 adapter->wol_support = hw->wol_enabled = 1;
985
986         /* Save initial wake up filter configuration */
987         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
988
989         return;
990 } /* ixgbe_check_wol_support */
991
992 /************************************************************************
993  * ixgbe_setup_interface
994  *
995  *   Setup networking device structure and register an interface.
996  ************************************************************************/
997 static int
998 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
999 {
1000         struct ifnet *ifp;
1001
1002         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1003
1004         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1005         if (ifp == NULL) {
1006                 device_printf(dev, "can not allocate ifnet structure\n");
1007                 return (-1);
1008         }
1009         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1010         ifp->if_baudrate = IF_Gbps(10);
1011         ifp->if_init = ixgbe_init;
1012         ifp->if_softc = adapter;
1013         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1014         ifp->if_ioctl = ixgbe_ioctl;
1015 #if __FreeBSD_version >= 1100036
1016         if_setgetcounterfn(ifp, ixgbe_get_counter);
1017 #endif
1018 #if __FreeBSD_version >= 1100045
1019         /* TSO parameters */
1020         ifp->if_hw_tsomax = 65518;
1021         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1022         ifp->if_hw_tsomaxsegsize = 2048;
1023 #endif
1024         if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1025                 ifp->if_start = ixgbe_legacy_start;
1026                 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1027                 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
1028                 IFQ_SET_READY(&ifp->if_snd);
1029                 ixgbe_start_locked = ixgbe_legacy_start_locked;
1030                 ixgbe_ring_empty = ixgbe_legacy_ring_empty;
1031         } else {
1032                 ifp->if_transmit = ixgbe_mq_start;
1033                 ifp->if_qflush = ixgbe_qflush;
1034                 ixgbe_start_locked = ixgbe_mq_start_locked;
1035                 ixgbe_ring_empty = drbr_empty;
1036         }
1037
1038         ether_ifattach(ifp, adapter->hw.mac.addr);
1039
1040         adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1041
1042         /*
1043          * Tell the upper layer(s) we support long frames.
1044          */
1045         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1046
1047         /* Set capability flags */
1048         ifp->if_capabilities |= IFCAP_HWCSUM
1049                              |  IFCAP_HWCSUM_IPV6
1050                              |  IFCAP_TSO
1051                              |  IFCAP_LRO
1052                              |  IFCAP_VLAN_HWTAGGING
1053                              |  IFCAP_VLAN_HWTSO
1054                              |  IFCAP_VLAN_HWCSUM
1055                              |  IFCAP_JUMBO_MTU
1056                              |  IFCAP_VLAN_MTU
1057                              |  IFCAP_HWSTATS;
1058
1059         /* Enable the above capabilities by default */
1060         ifp->if_capenable = ifp->if_capabilities;
1061
1062         /*
1063          * Don't turn this on by default, if vlans are
1064          * created on another pseudo device (eg. lagg)
1065          * then vlan events are not passed thru, breaking
1066          * operation, but with HW FILTER off it works. If
1067          * using vlans directly on the ixgbe driver you can
1068          * enable this and get full hardware tag filtering.
1069          */
1070         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1071
1072         /*
1073          * Specify the media types supported by this adapter and register
1074          * callbacks to update media and link information
1075          */
1076         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1077             ixgbe_media_status);
1078
1079         adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1080         ixgbe_add_media_types(adapter);
1081
1082         /* Set autoselect media by default */
1083         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1084
1085         return (0);
1086 } /* ixgbe_setup_interface */
1087
1088 #if __FreeBSD_version >= 1100036
1089 /************************************************************************
1090  * ixgbe_get_counter
1091  ************************************************************************/
1092 static uint64_t
1093 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
1094 {
1095         struct adapter *adapter;
1096         struct tx_ring *txr;
1097         uint64_t       rv;
1098
1099         adapter = if_getsoftc(ifp);
1100
1101         switch (cnt) {
1102         case IFCOUNTER_IPACKETS:
1103                 return (adapter->ipackets);
1104         case IFCOUNTER_OPACKETS:
1105                 return (adapter->opackets);
1106         case IFCOUNTER_IBYTES:
1107                 return (adapter->ibytes);
1108         case IFCOUNTER_OBYTES:
1109                 return (adapter->obytes);
1110         case IFCOUNTER_IMCASTS:
1111                 return (adapter->imcasts);
1112         case IFCOUNTER_OMCASTS:
1113                 return (adapter->omcasts);
1114         case IFCOUNTER_COLLISIONS:
1115                 return (0);
1116         case IFCOUNTER_IQDROPS:
1117                 return (adapter->iqdrops);
1118         case IFCOUNTER_OQDROPS:
1119                 rv = 0;
1120                 txr = adapter->tx_rings;
1121                 for (int i = 0; i < adapter->num_queues; i++, txr++)
1122                         rv += txr->br->br_drops;
1123                 return (rv);
1124         case IFCOUNTER_IERRORS:
1125                 return (adapter->ierrors);
1126         default:
1127                 return (if_get_counter_default(ifp, cnt));
1128         }
1129 } /* ixgbe_get_counter */
1130 #endif
1131
1132 /************************************************************************
1133  * ixgbe_add_media_types
1134  ************************************************************************/
1135 static void
1136 ixgbe_add_media_types(struct adapter *adapter)
1137 {
1138         struct ixgbe_hw *hw = &adapter->hw;
1139         device_t        dev = adapter->dev;
1140         u64             layer;
1141
1142         layer = adapter->phy_layer;
1143
1144         /* Media types with matching FreeBSD media defines */
1145         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1146                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1147         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1148                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1149         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1150                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1151         if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
1152                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1153
1154         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1155             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1156                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0,
1157                     NULL);
1158
1159         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1160                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1161                 if (hw->phy.multispeed_fiber)
1162                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0,
1163                             NULL);
1164         }
1165         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1166                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1167                 if (hw->phy.multispeed_fiber)
1168                         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0,
1169                             NULL);
1170         } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1171                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1172         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1173                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1174
1175 #ifdef IFM_ETH_XTYPE
1176         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1177                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1178         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1179                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1180         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1181                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1182         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1183                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1184 #else
1185         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1186                 device_printf(dev, "Media supported: 10GbaseKR\n");
1187                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
1188                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1189         }
1190         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1191                 device_printf(dev, "Media supported: 10GbaseKX4\n");
1192                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
1193                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1194         }
1195         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1196                 device_printf(dev, "Media supported: 1000baseKX\n");
1197                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
1198                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1199         }
1200         if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1201                 device_printf(dev, "Media supported: 2500baseKX\n");
1202                 device_printf(dev, "2500baseKX mapped to 2500baseSX\n");
1203                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
1204         }
1205 #endif
1206         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1207                 device_printf(dev, "Media supported: 1000baseBX\n");
1208
1209         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1210                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1211                     0, NULL);
1212                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1213         }
1214
1215         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1216 } /* ixgbe_add_media_types */
1217
1218 /************************************************************************
1219  * ixgbe_is_sfp
1220  ************************************************************************/
1221 static inline bool
1222 ixgbe_is_sfp(struct ixgbe_hw *hw)
1223 {
1224         switch (hw->mac.type) {
1225         case ixgbe_mac_82598EB:
1226                 if (hw->phy.type == ixgbe_phy_nl)
1227                         return TRUE;
1228                 return FALSE;
1229         case ixgbe_mac_82599EB:
1230                 switch (hw->mac.ops.get_media_type(hw)) {
1231                 case ixgbe_media_type_fiber:
1232                 case ixgbe_media_type_fiber_qsfp:
1233                         return TRUE;
1234                 default:
1235                         return FALSE;
1236                 }
1237         case ixgbe_mac_X550EM_x:
1238         case ixgbe_mac_X550EM_a:
1239                 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1240                         return TRUE;
1241                 return FALSE;
1242         default:
1243                 return FALSE;
1244         }
1245 } /* ixgbe_is_sfp */
1246
1247 /************************************************************************
1248  * ixgbe_config_link
1249  ************************************************************************/
1250 static void
1251 ixgbe_config_link(struct adapter *adapter)
1252 {
1253         struct ixgbe_hw *hw = &adapter->hw;
1254         u32             autoneg, err = 0;
1255         bool            sfp, negotiate;
1256
1257         sfp = ixgbe_is_sfp(hw);
1258
1259         if (sfp) {
1260                 if (hw->phy.multispeed_fiber) {
1261                         hw->mac.ops.setup_sfp(hw);
1262                         ixgbe_enable_tx_laser(hw);
1263                         adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
1264                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
1265                 } else {
1266                         adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
1267                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
1268                 }
1269         } else {
1270                 if (hw->mac.ops.check_link)
1271                         err = ixgbe_check_link(hw, &adapter->link_speed,
1272                             &adapter->link_up, FALSE);
1273                 if (err)
1274                         goto out;
1275                 autoneg = hw->phy.autoneg_advertised;
1276                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
1277                         err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1278                             &negotiate);
1279                 if (err)
1280                         goto out;
1281                 if (hw->mac.ops.setup_link)
1282                         err = hw->mac.ops.setup_link(hw, autoneg,
1283                             adapter->link_up);
1284         }
1285 out:
1286
1287         return;
1288 } /* ixgbe_config_link */
1289
1290 /************************************************************************
1291  * ixgbe_update_stats_counters - Update board statistics counters.
1292  ************************************************************************/
1293 static void
1294 ixgbe_update_stats_counters(struct adapter *adapter)
1295 {
1296         struct ixgbe_hw       *hw = &adapter->hw;
1297         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1298         u32                   missed_rx = 0, bprc, lxon, lxoff, total;
1299         u64                   total_missed_rx = 0;
1300
1301         stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1302         stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1303         stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1304         stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1305         stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0));
1306
1307         for (int i = 0; i < 16; i++) {
1308                 stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1309                 stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1310                 stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1311         }
1312         stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
1313         stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
1314         stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1315
1316         /* Hardware workaround, gprc counts missed packets */
1317         stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
1318         stats->gprc -= missed_rx;
1319
1320         if (hw->mac.type != ixgbe_mac_82598EB) {
1321                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1322                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1323                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1324                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
1325                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
1326                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1327                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1328                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1329         } else {
1330                 stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1331                 stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1332                 /* 82598 only has a counter in the high register */
1333                 stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1334                 stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
1335                 stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
1336         }
1337
1338         /*
1339          * Workaround: mprc hardware is incorrectly counting
1340          * broadcasts, so for now we subtract those.
1341          */
1342         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1343         stats->bprc += bprc;
1344         stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1345         if (hw->mac.type == ixgbe_mac_82598EB)
1346                 stats->mprc -= bprc;
1347
1348         stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1349         stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1350         stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1351         stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1352         stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1353         stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1354
1355         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1356         stats->lxontxc += lxon;
1357         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1358         stats->lxofftxc += lxoff;
1359         total = lxon + lxoff;
1360
1361         stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
1362         stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
1363         stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
1364         stats->gptc -= total;
1365         stats->mptc -= total;
1366         stats->ptc64 -= total;
1367         stats->gotc -= total * ETHER_MIN_LEN;
1368
1369         stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
1370         stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
1371         stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1372         stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
1373         stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1374         stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1375         stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1376         stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
1377         stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
1378         stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
1379         stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
1380         stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
1381         stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1382         stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1383         stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
1384         stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
1385         stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1386         stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1387         /* Only read FCOE on 82599 */
1388         if (hw->mac.type != ixgbe_mac_82598EB) {
1389                 stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1390                 stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1391                 stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1392                 stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1393                 stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1394         }
1395
1396         /* Fill out the OS statistics structure */
1397         IXGBE_SET_IPACKETS(adapter, stats->gprc);
1398         IXGBE_SET_OPACKETS(adapter, stats->gptc);
1399         IXGBE_SET_IBYTES(adapter, stats->gorc);
1400         IXGBE_SET_OBYTES(adapter, stats->gotc);
1401         IXGBE_SET_IMCASTS(adapter, stats->mprc);
1402         IXGBE_SET_OMCASTS(adapter, stats->mptc);
1403         IXGBE_SET_COLLISIONS(adapter, 0);
1404         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
1405         IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->rlec);
1406 } /* ixgbe_update_stats_counters */
1407
1408 /************************************************************************
1409  * ixgbe_add_hw_stats
1410  *
1411  *   Add sysctl variables, one per statistic, to the system.
1412  ************************************************************************/
1413 static void
1414 ixgbe_add_hw_stats(struct adapter *adapter)
1415 {
1416         device_t               dev = adapter->dev;
1417         struct tx_ring         *txr = adapter->tx_rings;
1418         struct rx_ring         *rxr = adapter->rx_rings;
1419         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1420         struct sysctl_oid      *tree = device_get_sysctl_tree(dev);
1421         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1422         struct ixgbe_hw_stats  *stats = &adapter->stats.pf;
1423         struct sysctl_oid      *stat_node, *queue_node;
1424         struct sysctl_oid_list *stat_list, *queue_list;
1425
1426 #define QUEUE_NAME_LEN 32
1427         char                   namebuf[QUEUE_NAME_LEN];
1428
1429         /* Driver Statistics */
1430         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1431             CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets");
1432         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1433             CTLFLAG_RD, &adapter->mbuf_defrag_failed, "m_defrag() failed");
1434         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1435             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1436         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1437             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1438
1439         for (int i = 0; i < adapter->num_queues; i++, txr++) {
1440                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1441                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1442                     CTLFLAG_RD, NULL, "Queue Name");
1443                 queue_list = SYSCTL_CHILDREN(queue_node);
1444
1445                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
1446                     CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
1447                     sizeof(&adapter->queues[i]),
1448                     ixgbe_sysctl_interrupt_rate_handler, "IU",
1449                     "Interrupt Rate");
1450                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1451                     CTLFLAG_RD, &(adapter->queues[i].irqs),
1452                     "irqs on this queue");
1453                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
1454                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1455                     ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
1456                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
1457                     CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
1458                     ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
1459                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1460                     CTLFLAG_RD, &txr->tso_tx, "TSO");
1461                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
1462                     CTLFLAG_RD, &txr->no_tx_dma_setup,
1463                     "Driver tx dma failure in xmit");
1464                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
1465                     CTLFLAG_RD, &txr->no_desc_avail,
1466                     "Queue No Descriptor Available");
1467                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1468                     CTLFLAG_RD, &txr->total_packets,
1469                     "Queue Packets Transmitted");
1470                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
1471                     CTLFLAG_RD, &txr->br->br_drops,
1472                     "Packets dropped in buf_ring");
1473         }
1474
1475         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1476                 struct lro_ctrl *lro = &rxr->lro;
1477
1478                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1479                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1480                     CTLFLAG_RD, NULL, "Queue Name");
1481                 queue_list = SYSCTL_CHILDREN(queue_node);
1482
1483                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
1484                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1485                     ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
1486                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
1487                     CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
1488                     ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
1489                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1490                     CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
1491                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1492                     CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received");
1493                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
1494                     CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames");
1495                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1496                     CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets");
1497                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_queued",
1498                     CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued");
1499                 SYSCTL_ADD_U64(ctx, queue_list, OID_AUTO, "lro_flushed",
1500                     CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed");
1501         }
1502
1503         /* MAC stats get their own sub node */
1504
1505         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
1506             CTLFLAG_RD, NULL, "MAC Statistics");
1507         stat_list = SYSCTL_CHILDREN(stat_node);
1508
1509         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
1510             CTLFLAG_RD, &stats->crcerrs, "CRC Errors");
1511         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
1512             CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors");
1513         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
1514             CTLFLAG_RD, &stats->errbc, "Byte Errors");
1515         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
1516             CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded");
1517         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
1518             CTLFLAG_RD, &stats->mlfc, "MAC Local Faults");
1519         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
1520             CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults");
1521         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
1522             CTLFLAG_RD, &stats->rlec, "Receive Length Errors");
1523         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets",
1524             CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count");
1525
1526         /* Flow Control stats */
1527         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
1528             CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted");
1529         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
1530             CTLFLAG_RD, &stats->lxonrxc, "Link XON Received");
1531         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
1532             CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted");
1533         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
1534             CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received");
1535
1536         /* Packet Reception Stats */
1537         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
1538             CTLFLAG_RD, &stats->tor, "Total Octets Received");
1539         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1540             CTLFLAG_RD, &stats->gorc, "Good Octets Received");
1541         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
1542             CTLFLAG_RD, &stats->tpr, "Total Packets Received");
1543         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1544             CTLFLAG_RD, &stats->gprc, "Good Packets Received");
1545         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1546             CTLFLAG_RD, &stats->mprc, "Multicast Packets Received");
1547         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
1548             CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received");
1549         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
1550             CTLFLAG_RD, &stats->prc64, "64 byte frames received ");
1551         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
1552             CTLFLAG_RD, &stats->prc127, "65-127 byte frames received");
1553         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
1554             CTLFLAG_RD, &stats->prc255, "128-255 byte frames received");
1555         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
1556             CTLFLAG_RD, &stats->prc511, "256-511 byte frames received");
1557         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
1558             CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received");
1559         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
1560             CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received");
1561         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
1562             CTLFLAG_RD, &stats->ruc, "Receive Undersized");
1563         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
1564             CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received ");
1565         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
1566             CTLFLAG_RD, &stats->roc, "Oversized Packets Received");
1567         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
1568             CTLFLAG_RD, &stats->rjc, "Received Jabber");
1569         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
1570             CTLFLAG_RD, &stats->mngprc, "Management Packets Received");
1571         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
1572             CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped");
1573         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
1574             CTLFLAG_RD, &stats->xec, "Checksum Errors");
1575
1576         /* Packet Transmission Stats */
1577         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1578             CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted");
1579         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
1580             CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted");
1581         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1582             CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted");
1583         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
1584             CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted");
1585         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
1586             CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted");
1587         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
1588             CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted");
1589         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
1590             CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted ");
1591         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
1592             CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted");
1593         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
1594             CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted");
1595         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
1596             CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted");
1597         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
1598             CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted");
1599         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
1600             CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted");
1601 } /* ixgbe_add_hw_stats */
1602
1603 /************************************************************************
1604  * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
1605  *
1606  *   Retrieves the TDH value from the hardware
1607  ************************************************************************/
1608 static int
1609 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
1610 {
1611         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1612         int            error;
1613         unsigned int   val;
1614
1615         if (!txr)
1616                 return (0);
1617
1618         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
1619         error = sysctl_handle_int(oidp, &val, 0, req);
1620         if (error || !req->newptr)
1621                 return error;
1622
1623         return (0);
1624 } /* ixgbe_sysctl_tdh_handler */
1625
1626 /************************************************************************
1627  * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1628  *
1629  *   Retrieves the TDT value from the hardware
1630  ************************************************************************/
1631 static int
1632 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
1633 {
1634         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
1635         int            error;
1636         unsigned int   val;
1637
1638         if (!txr)
1639                 return (0);
1640
1641         val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
1642         error = sysctl_handle_int(oidp, &val, 0, req);
1643         if (error || !req->newptr)
1644                 return error;
1645
1646         return (0);
1647 } /* ixgbe_sysctl_tdt_handler */
1648
1649 /************************************************************************
1650  * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
1651  *
1652  *   Retrieves the RDH value from the hardware
1653  ************************************************************************/
1654 static int
1655 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
1656 {
1657         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1658         int            error;
1659         unsigned int   val;
1660
1661         if (!rxr)
1662                 return (0);
1663
1664         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
1665         error = sysctl_handle_int(oidp, &val, 0, req);
1666         if (error || !req->newptr)
1667                 return error;
1668
1669         return (0);
1670 } /* ixgbe_sysctl_rdh_handler */
1671
1672 /************************************************************************
1673  * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
1674  *
1675  *   Retrieves the RDT value from the hardware
1676  ************************************************************************/
1677 static int
1678 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
1679 {
1680         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
1681         int            error;
1682         unsigned int   val;
1683
1684         if (!rxr)
1685                 return (0);
1686
1687         val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
1688         error = sysctl_handle_int(oidp, &val, 0, req);
1689         if (error || !req->newptr)
1690                 return error;
1691
1692         return (0);
1693 } /* ixgbe_sysctl_rdt_handler */
1694
1695 /************************************************************************
1696  * ixgbe_register_vlan
1697  *
1698  *   Run via vlan config EVENT, it enables us to use the
1699  *   HW Filter table since we can get the vlan id. This
1700  *   just creates the entry in the soft version of the
1701  *   VFTA, init will repopulate the real table.
1702  ************************************************************************/
1703 static void
1704 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1705 {
1706         struct adapter *adapter = ifp->if_softc;
1707         u16            index, bit;
1708
1709         if (ifp->if_softc != arg)   /* Not our event */
1710                 return;
1711
1712         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1713                 return;
1714
1715         IXGBE_CORE_LOCK(adapter);
1716         index = (vtag >> 5) & 0x7F;
1717         bit = vtag & 0x1F;
1718         adapter->shadow_vfta[index] |= (1 << bit);
1719         ++adapter->num_vlans;
1720         ixgbe_setup_vlan_hw_support(adapter);
1721         IXGBE_CORE_UNLOCK(adapter);
1722 } /* ixgbe_register_vlan */
1723
1724 /************************************************************************
1725  * ixgbe_unregister_vlan
1726  *
1727  *   Run via vlan unconfig EVENT, remove our entry in the soft vfta.
1728  ************************************************************************/
1729 static void
1730 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1731 {
1732         struct adapter *adapter = ifp->if_softc;
1733         u16            index, bit;
1734
1735         if (ifp->if_softc != arg)
1736                 return;
1737
1738         if ((vtag == 0) || (vtag > 4095))  /* Invalid */
1739                 return;
1740
1741         IXGBE_CORE_LOCK(adapter);
1742         index = (vtag >> 5) & 0x7F;
1743         bit = vtag & 0x1F;
1744         adapter->shadow_vfta[index] &= ~(1 << bit);
1745         --adapter->num_vlans;
1746         /* Re-init to load the changes */
1747         ixgbe_setup_vlan_hw_support(adapter);
1748         IXGBE_CORE_UNLOCK(adapter);
1749 } /* ixgbe_unregister_vlan */
1750
1751 /************************************************************************
1752  * ixgbe_setup_vlan_hw_support
1753  ************************************************************************/
1754 static void
1755 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
1756 {
1757         struct ifnet    *ifp = adapter->ifp;
1758         struct ixgbe_hw *hw = &adapter->hw;
1759         struct rx_ring  *rxr;
1760         int             i;
1761         u32             ctrl;
1762
1763
1764         /*
1765          * We get here thru init_locked, meaning
1766          * a soft reset, this has already cleared
1767          * the VFTA and other state, so if there
1768          * have been no vlan's registered do nothing.
1769          */
1770         if (adapter->num_vlans == 0)
1771                 return;
1772
1773         /* Setup the queues for vlans */
1774         for (i = 0; i < adapter->num_queues; i++) {
1775                 rxr = &adapter->rx_rings[i];
1776                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
1777                 if (hw->mac.type != ixgbe_mac_82598EB) {
1778                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
1779                         ctrl |= IXGBE_RXDCTL_VME;
1780                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
1781                 }
1782                 rxr->vtag_strip = TRUE;
1783         }
1784
1785         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1786                 return;
1787         /*
1788          * A soft reset zero's out the VFTA, so
1789          * we need to repopulate it now.
1790          */
1791         for (i = 0; i < IXGBE_VFTA_SIZE; i++)
1792                 if (adapter->shadow_vfta[i] != 0)
1793                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
1794                             adapter->shadow_vfta[i]);
1795
1796         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1797         /* Enable the Filter Table if enabled */
1798         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
1799                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1800                 ctrl |= IXGBE_VLNCTRL_VFE;
1801         }
1802         if (hw->mac.type == ixgbe_mac_82598EB)
1803                 ctrl |= IXGBE_VLNCTRL_VME;
1804         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
1805 } /* ixgbe_setup_vlan_hw_support */
1806
1807 /************************************************************************
1808  * ixgbe_get_slot_info
1809  *
1810  *   Get the width and transaction speed of
1811  *   the slot this adapter is plugged into.
1812  ************************************************************************/
1813 static void
1814 ixgbe_get_slot_info(struct adapter *adapter)
1815 {
1816         device_t              dev = adapter->dev;
1817         struct ixgbe_hw       *hw = &adapter->hw;
1818         u32                   offset;
1819         u16                   link;
1820         int                   bus_info_valid = TRUE;
1821
1822         /* Some devices are behind an internal bridge */
1823         switch (hw->device_id) {
1824         case IXGBE_DEV_ID_82599_SFP_SF_QP:
1825         case IXGBE_DEV_ID_82599_QSFP_SF_QP:
1826                 goto get_parent_info;
1827         default:
1828                 break;
1829         }
1830
1831         ixgbe_get_bus_info(hw);
1832
1833         /*
1834          * Some devices don't use PCI-E, but there is no need
1835          * to display "Unknown" for bus speed and width.
1836          */
1837         switch (hw->mac.type) {
1838         case ixgbe_mac_X550EM_x:
1839         case ixgbe_mac_X550EM_a:
1840                 return;
1841         default:
1842                 goto display;
1843         }
1844
1845 get_parent_info:
1846         /*
1847          * For the Quad port adapter we need to parse back
1848          * up the PCI tree to find the speed of the expansion
1849          * slot into which this adapter is plugged. A bit more work.
1850          */
1851         dev = device_get_parent(device_get_parent(dev));
1852 #ifdef IXGBE_DEBUG
1853         device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
1854             pci_get_slot(dev), pci_get_function(dev));
1855 #endif
1856         dev = device_get_parent(device_get_parent(dev));
1857 #ifdef IXGBE_DEBUG
1858         device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
1859             pci_get_slot(dev), pci_get_function(dev));
1860 #endif
1861         /* Now get the PCI Express Capabilities offset */
1862         if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) {
1863                 /*
1864                  * Hmm...can't get PCI-Express capabilities.
1865                  * Falling back to default method.
1866                  */
1867                 bus_info_valid = FALSE;
1868                 ixgbe_get_bus_info(hw);
1869                 goto display;
1870         }
1871         /* ...and read the Link Status Register */
1872         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
1873         ixgbe_set_pci_config_data_generic(hw, link);
1874
1875 display:
1876         device_printf(dev, "PCI Express Bus: Speed %s %s\n",
1877             ((hw->bus.speed == ixgbe_bus_speed_8000)    ? "8.0GT/s"  :
1878              (hw->bus.speed == ixgbe_bus_speed_5000)    ? "5.0GT/s"  :
1879              (hw->bus.speed == ixgbe_bus_speed_2500)    ? "2.5GT/s"  :
1880              "Unknown"),
1881             ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
1882              (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
1883              (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
1884              "Unknown"));
1885
1886         if (bus_info_valid) {
1887                 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1888                     ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
1889                     (hw->bus.speed == ixgbe_bus_speed_2500))) {
1890                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1891                         device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
1892                 }
1893                 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
1894                     ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
1895                     (hw->bus.speed < ixgbe_bus_speed_8000))) {
1896                         device_printf(dev, "PCI-Express bandwidth available for this card\n     is not sufficient for optimal performance.\n");
1897                         device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
1898                 }
1899         } else
1900                 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
1901
1902         return;
1903 } /* ixgbe_get_slot_info */
1904
1905 /************************************************************************
1906  * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
1907  ************************************************************************/
1908 static inline void
1909 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1910 {
1911         struct ixgbe_hw *hw = &adapter->hw;
1912         u64             queue = (u64)(1 << vector);
1913         u32             mask;
1914
1915         if (hw->mac.type == ixgbe_mac_82598EB) {
1916                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1917                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1918         } else {
1919                 mask = (queue & 0xFFFFFFFF);
1920                 if (mask)
1921                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1922                 mask = (queue >> 32);
1923                 if (mask)
1924                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1925         }
1926 } /* ixgbe_enable_queue */
1927
1928 /************************************************************************
1929  * ixgbe_disable_queue
1930  ************************************************************************/
1931 static inline void
1932 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1933 {
1934         struct ixgbe_hw *hw = &adapter->hw;
1935         u64             queue = (u64)(1 << vector);
1936         u32             mask;
1937
1938         if (hw->mac.type == ixgbe_mac_82598EB) {
1939                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1940                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1941         } else {
1942                 mask = (queue & 0xFFFFFFFF);
1943                 if (mask)
1944                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1945                 mask = (queue >> 32);
1946                 if (mask)
1947                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1948         }
1949 } /* ixgbe_disable_queue */
1950
1951 /************************************************************************
1952  * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
1953  ************************************************************************/
1954 void
1955 ixgbe_msix_que(void *arg)
1956 {
1957         struct ix_queue *que = arg;
1958         struct adapter  *adapter = que->adapter;
1959         struct ifnet    *ifp = adapter->ifp;
1960         struct tx_ring  *txr = que->txr;
1961         struct rx_ring  *rxr = que->rxr;
1962         bool            more;
1963         u32             newitr = 0;
1964
1965
1966         /* Protect against spurious interrupts */
1967         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1968                 return;
1969
1970         ixgbe_disable_queue(adapter, que->msix);
1971         ++que->irqs;
1972
1973         more = ixgbe_rxeof(que);
1974
1975         IXGBE_TX_LOCK(txr);
1976         ixgbe_txeof(txr);
1977         if (!ixgbe_ring_empty(ifp, txr->br))
1978                 ixgbe_start_locked(ifp, txr);
1979         IXGBE_TX_UNLOCK(txr);
1980
1981         /* Do AIM now? */
1982
1983         if (adapter->enable_aim == FALSE)
1984                 goto no_calc;
1985         /*
1986          * Do Adaptive Interrupt Moderation:
1987          *  - Write out last calculated setting
1988          *  - Calculate based on average size over
1989          *    the last interval.
1990          */
1991         if (que->eitr_setting)
1992                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix),
1993                     que->eitr_setting);
1994
1995         que->eitr_setting = 0;
1996
1997         /* Idle, do nothing */
1998         if ((txr->bytes == 0) && (rxr->bytes == 0))
1999                 goto no_calc;
2000
2001         if ((txr->bytes) && (txr->packets))
2002                 newitr = txr->bytes/txr->packets;
2003         if ((rxr->bytes) && (rxr->packets))
2004                 newitr = max(newitr, (rxr->bytes / rxr->packets));
2005         newitr += 24; /* account for hardware frame, crc */
2006
2007         /* set an upper boundary */
2008         newitr = min(newitr, 3000);
2009
2010         /* Be nice to the mid range */
2011         if ((newitr > 300) && (newitr < 1200))
2012                 newitr = (newitr / 3);
2013         else
2014                 newitr = (newitr / 2);
2015
2016         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2017                 newitr |= newitr << 16;
2018         else
2019                 newitr |= IXGBE_EITR_CNT_WDIS;
2020
2021         /* save for next interrupt */
2022         que->eitr_setting = newitr;
2023
2024         /* Reset state */
2025         txr->bytes = 0;
2026         txr->packets = 0;
2027         rxr->bytes = 0;
2028         rxr->packets = 0;
2029
2030 no_calc:
2031         if (more)
2032                 taskqueue_enqueue(que->tq, &que->que_task);
2033         else
2034                 ixgbe_enable_queue(adapter, que->msix);
2035
2036         return;
2037 } /* ixgbe_msix_que */
2038
2039 /************************************************************************
2040  * ixgbe_media_status - Media Ioctl callback
2041  *
2042  *   Called whenever the user queries the status of
2043  *   the interface using ifconfig.
2044  ************************************************************************/
2045 static void
2046 ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2047 {
2048         struct adapter  *adapter = ifp->if_softc;
2049         struct ixgbe_hw *hw = &adapter->hw;
2050         int             layer;
2051
2052         INIT_DEBUGOUT("ixgbe_media_status: begin");
2053         IXGBE_CORE_LOCK(adapter);
2054         ixgbe_update_link_status(adapter);
2055
2056         ifmr->ifm_status = IFM_AVALID;
2057         ifmr->ifm_active = IFM_ETHER;
2058
2059         if (!adapter->link_active) {
2060                 IXGBE_CORE_UNLOCK(adapter);
2061                 return;
2062         }
2063
2064         ifmr->ifm_status |= IFM_ACTIVE;
2065         layer = adapter->phy_layer;
2066
2067         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2068             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2069             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2070             layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2071                 switch (adapter->link_speed) {
2072                 case IXGBE_LINK_SPEED_10GB_FULL:
2073                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2074                         break;
2075                 case IXGBE_LINK_SPEED_1GB_FULL:
2076                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2077                         break;
2078                 case IXGBE_LINK_SPEED_100_FULL:
2079                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2080                         break;
2081                 case IXGBE_LINK_SPEED_10_FULL:
2082                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2083                         break;
2084                 }
2085         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2086             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2087                 switch (adapter->link_speed) {
2088                 case IXGBE_LINK_SPEED_10GB_FULL:
2089                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2090                         break;
2091                 }
2092         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2093                 switch (adapter->link_speed) {
2094                 case IXGBE_LINK_SPEED_10GB_FULL:
2095                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2096                         break;
2097                 case IXGBE_LINK_SPEED_1GB_FULL:
2098                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2099                         break;
2100                 }
2101         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2102                 switch (adapter->link_speed) {
2103                 case IXGBE_LINK_SPEED_10GB_FULL:
2104                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2105                         break;
2106                 case IXGBE_LINK_SPEED_1GB_FULL:
2107                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2108                         break;
2109                 }
2110         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2111             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2112                 switch (adapter->link_speed) {
2113                 case IXGBE_LINK_SPEED_10GB_FULL:
2114                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2115                         break;
2116                 case IXGBE_LINK_SPEED_1GB_FULL:
2117                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2118                         break;
2119                 }
2120         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2121                 switch (adapter->link_speed) {
2122                 case IXGBE_LINK_SPEED_10GB_FULL:
2123                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2124                         break;
2125                 }
2126         /*
2127          * XXX: These need to use the proper media types once
2128          * they're added.
2129          */
2130 #ifndef IFM_ETH_XTYPE
2131         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2132                 switch (adapter->link_speed) {
2133                 case IXGBE_LINK_SPEED_10GB_FULL:
2134                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2135                         break;
2136                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2137                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2138                         break;
2139                 case IXGBE_LINK_SPEED_1GB_FULL:
2140                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2141                         break;
2142                 }
2143         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2144             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2145             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2146                 switch (adapter->link_speed) {
2147                 case IXGBE_LINK_SPEED_10GB_FULL:
2148                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2149                         break;
2150                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2151                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
2152                         break;
2153                 case IXGBE_LINK_SPEED_1GB_FULL:
2154                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
2155                         break;
2156                 }
2157 #else
2158         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2159                 switch (adapter->link_speed) {
2160                 case IXGBE_LINK_SPEED_10GB_FULL:
2161                         ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2162                         break;
2163                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2164                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2165                         break;
2166                 case IXGBE_LINK_SPEED_1GB_FULL:
2167                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2168                         break;
2169                 }
2170         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2171             layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2172             layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2173                 switch (adapter->link_speed) {
2174                 case IXGBE_LINK_SPEED_10GB_FULL:
2175                         ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2176                         break;
2177                 case IXGBE_LINK_SPEED_2_5GB_FULL:
2178                         ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2179                         break;
2180                 case IXGBE_LINK_SPEED_1GB_FULL:
2181                         ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2182                         break;
2183                 }
2184 #endif
2185
2186         /* If nothing is recognized... */
2187         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2188                 ifmr->ifm_active |= IFM_UNKNOWN;
2189
2190 #if __FreeBSD_version >= 900025
2191         /* Display current flow control setting used on link */
2192         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2193             hw->fc.current_mode == ixgbe_fc_full)
2194                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2195         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2196             hw->fc.current_mode == ixgbe_fc_full)
2197                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2198 #endif
2199
2200         IXGBE_CORE_UNLOCK(adapter);
2201
2202         return;
2203 } /* ixgbe_media_status */
2204
2205 /************************************************************************
2206  * ixgbe_media_change - Media Ioctl callback
2207  *
2208  *   Called when the user changes speed/duplex using
2209  *   media/mediopt option with ifconfig.
2210  ************************************************************************/
2211 static int
2212 ixgbe_media_change(struct ifnet *ifp)
2213 {
2214         struct adapter   *adapter = ifp->if_softc;
2215         struct ifmedia   *ifm = &adapter->media;
2216         struct ixgbe_hw  *hw = &adapter->hw;
2217         ixgbe_link_speed speed = 0;
2218
2219         INIT_DEBUGOUT("ixgbe_media_change: begin");
2220
2221         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2222                 return (EINVAL);
2223
2224         if (hw->phy.media_type == ixgbe_media_type_backplane)
2225                 return (ENODEV);
2226
2227         /*
2228          * We don't actually need to check against the supported
2229          * media types of the adapter; ifmedia will take care of
2230          * that for us.
2231          */
2232         switch (IFM_SUBTYPE(ifm->ifm_media)) {
2233                 case IFM_AUTO:
2234                 case IFM_10G_T:
2235                         speed |= IXGBE_LINK_SPEED_100_FULL;
2236                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2237                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2238                         break;
2239                 case IFM_10G_LRM:
2240                 case IFM_10G_LR:
2241 #ifndef IFM_ETH_XTYPE
2242                 case IFM_10G_SR: /* KR, too */
2243                 case IFM_10G_CX4: /* KX4 */
2244 #else
2245                 case IFM_10G_KR:
2246                 case IFM_10G_KX4:
2247 #endif
2248                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2249                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2250                         break;
2251 #ifndef IFM_ETH_XTYPE
2252                 case IFM_1000_CX: /* KX */
2253 #else
2254                 case IFM_1000_KX:
2255 #endif
2256                 case IFM_1000_LX:
2257                 case IFM_1000_SX:
2258                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2259                         break;
2260                 case IFM_1000_T:
2261                         speed |= IXGBE_LINK_SPEED_100_FULL;
2262                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
2263                         break;
2264                 case IFM_10G_TWINAX:
2265                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
2266                         break;
2267                 case IFM_100_TX:
2268                         speed |= IXGBE_LINK_SPEED_100_FULL;
2269                         break;
2270                 case IFM_10_T:
2271                         speed |= IXGBE_LINK_SPEED_10_FULL;
2272                         break;
2273                 default:
2274                         goto invalid;
2275         }
2276
2277         hw->mac.autotry_restart = TRUE;
2278         hw->mac.ops.setup_link(hw, speed, TRUE);
2279         adapter->advertise =
2280             ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
2281             ((speed & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
2282             ((speed & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
2283             ((speed & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
2284
2285         return (0);
2286
2287 invalid:
2288         device_printf(adapter->dev, "Invalid media type!\n");
2289
2290         return (EINVAL);
2291 } /* ixgbe_media_change */
2292
2293 /************************************************************************
2294  * ixgbe_set_promisc
2295  ************************************************************************/
2296 static void
2297 ixgbe_set_promisc(struct adapter *adapter)
2298 {
2299         struct ifnet *ifp = adapter->ifp;
2300         int          mcnt = 0;
2301         u32          rctl;
2302
2303         rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2304         rctl &= (~IXGBE_FCTRL_UPE);
2305         if (ifp->if_flags & IFF_ALLMULTI)
2306                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
2307         else {
2308                 struct ifmultiaddr *ifma;
2309 #if __FreeBSD_version < 800000
2310                 IF_ADDR_LOCK(ifp);
2311 #else
2312                 if_maddr_rlock(ifp);
2313 #endif
2314                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2315                         if (ifma->ifma_addr->sa_family != AF_LINK)
2316                                 continue;
2317                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2318                                 break;
2319                         mcnt++;
2320                 }
2321 #if __FreeBSD_version < 800000
2322                 IF_ADDR_UNLOCK(ifp);
2323 #else
2324                 if_maddr_runlock(ifp);
2325 #endif
2326         }
2327         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
2328                 rctl &= (~IXGBE_FCTRL_MPE);
2329         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2330
2331         if (ifp->if_flags & IFF_PROMISC) {
2332                 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2333                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2334         } else if (ifp->if_flags & IFF_ALLMULTI) {
2335                 rctl |= IXGBE_FCTRL_MPE;
2336                 rctl &= ~IXGBE_FCTRL_UPE;
2337                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
2338         }
2339 } /* ixgbe_set_promisc */
2340
2341 /************************************************************************
2342  * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
2343  ************************************************************************/
2344 static void
2345 ixgbe_msix_link(void *arg)
2346 {
2347         struct adapter  *adapter = arg;
2348         struct ixgbe_hw *hw = &adapter->hw;
2349         u32             eicr, eicr_mask;
2350         s32             retval;
2351
2352         ++adapter->link_irq;
2353
2354         /* Pause other interrupts */
2355         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
2356
2357         /* First get the cause */
2358         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
2359         /* Be sure the queue bits are not cleared */
2360         eicr &= ~IXGBE_EICR_RTX_QUEUE;
2361         /* Clear interrupt with write */
2362         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
2363
2364         /* Link status change */
2365         if (eicr & IXGBE_EICR_LSC) {
2366                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
2367                 adapter->task_requests |= IXGBE_REQUEST_TASK_LINK;
2368                 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2369         }
2370
2371         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2372                 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
2373                     (eicr & IXGBE_EICR_FLOW_DIR)) {
2374                         /* This is probably overkill :) */
2375                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
2376                                 return;
2377                         /* Disable the interrupt */
2378                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
2379                         adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR;
2380                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2381                 }
2382
2383                 if (eicr & IXGBE_EICR_ECC) {
2384                         device_printf(adapter->dev,
2385                             "CRITICAL: ECC ERROR!!  Please Reboot!!\n");
2386                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2387                 }
2388
2389                 /* Check for over temp condition */
2390                 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
2391                         switch (adapter->hw.mac.type) {
2392                         case ixgbe_mac_X550EM_a:
2393                                 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
2394                                         break;
2395                                 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
2396                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2397                                 IXGBE_WRITE_REG(hw, IXGBE_EICR,
2398                                     IXGBE_EICR_GPI_SDP0_X550EM_a);
2399                                 retval = hw->phy.ops.check_overtemp(hw);
2400                                 if (retval != IXGBE_ERR_OVERTEMP)
2401                                         break;
2402                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2403                                 device_printf(adapter->dev, "System shutdown required!\n");
2404                                 break;
2405                         default:
2406                                 if (!(eicr & IXGBE_EICR_TS))
2407                                         break;
2408                                 retval = hw->phy.ops.check_overtemp(hw);
2409                                 if (retval != IXGBE_ERR_OVERTEMP)
2410                                         break;
2411                                 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
2412                                 device_printf(adapter->dev, "System shutdown required!\n");
2413                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
2414                                 break;
2415                         }
2416                 }
2417
2418                 /* Check for VF message */
2419                 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
2420                     (eicr & IXGBE_EICR_MAILBOX)) {
2421                         adapter->task_requests |= IXGBE_REQUEST_TASK_MBX;
2422                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2423                 }
2424         }
2425
2426         if (ixgbe_is_sfp(hw)) {
2427                 /* Pluggable optics-related interrupt */
2428                 if (hw->mac.type >= ixgbe_mac_X540)
2429                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
2430                 else
2431                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
2432
2433                 if (eicr & eicr_mask) {
2434                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
2435                         adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
2436                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2437                 }
2438
2439                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2440                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
2441                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
2442                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2443                         adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
2444                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2445                 }
2446         }
2447
2448         /* Check for fan failure */
2449         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
2450                 ixgbe_check_fan_failure(adapter, eicr, TRUE);
2451                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
2452         }
2453
2454         /* External PHY interrupt */
2455         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
2456             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
2457                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
2458                 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
2459                 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
2460         }
2461 } /* ixgbe_msix_link */
2462
2463 /************************************************************************
2464  * ixgbe_sysctl_interrupt_rate_handler
2465  ************************************************************************/
2466 static int
2467 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
2468 {
2469         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
2470         int             error;
2471         unsigned int    reg, usec, rate;
2472
2473         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
2474         usec = ((reg & 0x0FF8) >> 3);
2475         if (usec > 0)
2476                 rate = 500000 / usec;
2477         else
2478                 rate = 0;
2479         error = sysctl_handle_int(oidp, &rate, 0, req);
2480         if (error || !req->newptr)
2481                 return error;
2482         reg &= ~0xfff; /* default, no limitation */
2483         ixgbe_max_interrupt_rate = 0;
2484         if (rate > 0 && rate < 500000) {
2485                 if (rate < 1000)
2486                         rate = 1000;
2487                 ixgbe_max_interrupt_rate = rate;
2488                 reg |= ((4000000/rate) & 0xff8);
2489         }
2490         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
2491
2492         return (0);
2493 } /* ixgbe_sysctl_interrupt_rate_handler */
2494
2495 /************************************************************************
2496  * ixgbe_add_device_sysctls
2497  ************************************************************************/
2498 static void
2499 ixgbe_add_device_sysctls(struct adapter *adapter)
2500 {
2501         device_t               dev = adapter->dev;
2502         struct ixgbe_hw        *hw = &adapter->hw;
2503         struct sysctl_oid_list *child;
2504         struct sysctl_ctx_list *ctx;
2505
2506         ctx = device_get_sysctl_ctx(dev);
2507         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2508
2509         /* Sysctls for all devices */
2510         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
2511             adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
2512
2513         adapter->enable_aim = ixgbe_enable_aim;
2514         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim", CTLFLAG_RW,
2515             &adapter->enable_aim, 1, "Interrupt Moderation");
2516
2517         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
2518             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_advertise, "I",
2519             IXGBE_SYSCTL_DESC_ADV_SPEED);
2520
2521 #ifdef IXGBE_DEBUG
2522         /* testing sysctls (for all devices) */
2523         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "power_state",
2524             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_power_state,
2525             "I", "PCI Power State");
2526
2527         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "print_rss_config",
2528             CTLTYPE_STRING | CTLFLAG_RD, adapter, 0,
2529             ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
2530 #endif
2531         /* for X550 series devices */
2532         if (hw->mac.type >= ixgbe_mac_X550)
2533                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
2534                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_dmac,
2535                     "I", "DMA Coalesce");
2536
2537         /* for WoL-capable devices */
2538         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2539                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
2540                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2541                     ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
2542
2543                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
2544                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_sysctl_wufc,
2545                     "I", "Enable/Disable Wake Up Filters");
2546         }
2547
2548         /* for X552/X557-AT devices */
2549         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
2550                 struct sysctl_oid *phy_node;
2551                 struct sysctl_oid_list *phy_list;
2552
2553                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
2554                     CTLFLAG_RD, NULL, "External PHY sysctls");
2555                 phy_list = SYSCTL_CHILDREN(phy_node);
2556
2557                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
2558                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0, ixgbe_sysctl_phy_temp,
2559                     "I", "Current External PHY Temperature (Celsius)");
2560
2561                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
2562                     CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
2563                     ixgbe_sysctl_phy_overtemp_occurred, "I",
2564                     "External PHY High Temperature Event Occurred");
2565         }
2566
2567         if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
2568                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "eee_state",
2569                     CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
2570                     ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
2571         }
2572 } /* ixgbe_add_device_sysctls */
2573
2574 /************************************************************************
2575  * ixgbe_allocate_pci_resources
2576  ************************************************************************/
2577 static int
2578 ixgbe_allocate_pci_resources(struct adapter *adapter)
2579 {
2580         device_t dev = adapter->dev;
2581         int      rid;
2582
2583         rid = PCIR_BAR(0);
2584         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2585             RF_ACTIVE);
2586
2587         if (!(adapter->pci_mem)) {
2588                 device_printf(dev, "Unable to allocate bus resource: memory\n");
2589                 return (ENXIO);
2590         }
2591
2592         /* Save bus_space values for READ/WRITE_REG macros */
2593         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
2594         adapter->osdep.mem_bus_space_handle =
2595             rman_get_bushandle(adapter->pci_mem);
2596         /* Set hw values for shared code */
2597         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2598
2599         return (0);
2600 } /* ixgbe_allocate_pci_resources */
2601
2602 /************************************************************************
2603  * ixgbe_detach - Device removal routine
2604  *
2605  *   Called when the driver is being removed.
2606  *   Stops the adapter and deallocates all the resources
2607  *   that were allocated for driver operation.
2608  *
2609  *   return 0 on success, positive on failure
2610  ************************************************************************/
2611 static int
2612 ixgbe_detach(device_t dev)
2613 {
2614         struct adapter  *adapter = device_get_softc(dev);
2615         struct ix_queue *que = adapter->queues;
2616         struct tx_ring  *txr = adapter->tx_rings;
2617         u32             ctrl_ext;
2618
2619         INIT_DEBUGOUT("ixgbe_detach: begin");
2620
2621         /* Make sure VLANS are not using driver */
2622         if (adapter->ifp->if_vlantrunk != NULL) {
2623                 device_printf(dev, "Vlan in use, detach first\n");
2624                 return (EBUSY);
2625         }
2626
2627         if (ixgbe_pci_iov_detach(dev) != 0) {
2628                 device_printf(dev, "SR-IOV in use; detach first.\n");
2629                 return (EBUSY);
2630         }
2631
2632         ether_ifdetach(adapter->ifp);
2633         /* Stop the adapter */
2634         IXGBE_CORE_LOCK(adapter);
2635         ixgbe_setup_low_power_mode(adapter);
2636         IXGBE_CORE_UNLOCK(adapter);
2637
2638         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2639                 if (que->tq) {
2640                         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2641                                 taskqueue_drain(que->tq, &txr->txq_task);
2642                         taskqueue_drain(que->tq, &que->que_task);
2643                         taskqueue_free(que->tq);
2644                 }
2645         }
2646
2647         /* let hardware know driver is unloading */
2648         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
2649         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
2650         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
2651
2652         /* Unregister VLAN events */
2653         if (adapter->vlan_attach != NULL)
2654                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
2655         if (adapter->vlan_detach != NULL)
2656                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
2657
2658         callout_drain(&adapter->timer);
2659
2660         if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
2661                 netmap_detach(adapter->ifp);
2662
2663         /* Drain the Admin Task queue */
2664         if (adapter->tq) {
2665                 taskqueue_drain(adapter->tq, &adapter->admin_task);
2666                 taskqueue_free(adapter->tq);
2667         }
2668
2669         ixgbe_free_pci_resources(adapter);
2670         bus_generic_detach(dev);
2671         if_free(adapter->ifp);
2672
2673         ixgbe_free_transmit_structures(adapter);
2674         ixgbe_free_receive_structures(adapter);
2675         free(adapter->queues, M_DEVBUF);
2676         free(adapter->mta, M_IXGBE);
2677
2678         IXGBE_CORE_LOCK_DESTROY(adapter);
2679
2680         return (0);
2681 } /* ixgbe_detach */
2682
2683 /************************************************************************
2684  * ixgbe_setup_low_power_mode - LPLU/WoL preparation
2685  *
2686  *   Prepare the adapter/port for LPLU and/or WoL
2687  ************************************************************************/
2688 static int
2689 ixgbe_setup_low_power_mode(struct adapter *adapter)
2690 {
2691         struct ixgbe_hw *hw = &adapter->hw;
2692         device_t        dev = adapter->dev;
2693         s32             error = 0;
2694
2695         mtx_assert(&adapter->core_mtx, MA_OWNED);
2696
2697         /* Limit power management flow to X550EM baseT */
2698         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
2699             hw->phy.ops.enter_lplu) {
2700                 /* Turn off support for APM wakeup. (Using ACPI instead) */
2701                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
2702                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
2703
2704                 /*
2705                  * Clear Wake Up Status register to prevent any previous wakeup
2706                  * events from waking us up immediately after we suspend.
2707                  */
2708                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2709
2710                 /*
2711                  * Program the Wakeup Filter Control register with user filter
2712                  * settings
2713                  */
2714                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
2715
2716                 /* Enable wakeups and power management in Wakeup Control */
2717                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
2718                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
2719
2720                 /* X550EM baseT adapters need a special LPLU flow */
2721                 hw->phy.reset_disable = true;
2722                 ixgbe_stop(adapter);
2723                 error = hw->phy.ops.enter_lplu(hw);
2724                 if (error)
2725                         device_printf(dev, "Error entering LPLU: %d\n", error);
2726                 hw->phy.reset_disable = false;
2727         } else {
2728                 /* Just stop for other adapters */
2729                 ixgbe_stop(adapter);
2730         }
2731
2732         return error;
2733 } /* ixgbe_setup_low_power_mode */
2734
2735 /************************************************************************
2736  * ixgbe_shutdown - Shutdown entry point
2737  ************************************************************************/
2738 static int
2739 ixgbe_shutdown(device_t dev)
2740 {
2741         struct adapter *adapter = device_get_softc(dev);
2742         int            error = 0;
2743
2744         INIT_DEBUGOUT("ixgbe_shutdown: begin");
2745
2746         IXGBE_CORE_LOCK(adapter);
2747         error = ixgbe_setup_low_power_mode(adapter);
2748         IXGBE_CORE_UNLOCK(adapter);
2749
2750         return (error);
2751 } /* ixgbe_shutdown */
2752
2753 /************************************************************************
2754  * ixgbe_suspend
2755  *
2756  *   From D0 to D3
2757  ************************************************************************/
2758 static int
2759 ixgbe_suspend(device_t dev)
2760 {
2761         struct adapter *adapter = device_get_softc(dev);
2762         int            error = 0;
2763
2764         INIT_DEBUGOUT("ixgbe_suspend: begin");
2765
2766         IXGBE_CORE_LOCK(adapter);
2767
2768         error = ixgbe_setup_low_power_mode(adapter);
2769
2770         IXGBE_CORE_UNLOCK(adapter);
2771
2772         return (error);
2773 } /* ixgbe_suspend */
2774
2775 /************************************************************************
2776  * ixgbe_resume
2777  *
2778  *   From D3 to D0
2779  ************************************************************************/
2780 static int
2781 ixgbe_resume(device_t dev)
2782 {
2783         struct adapter  *adapter = device_get_softc(dev);
2784         struct ifnet    *ifp = adapter->ifp;
2785         struct ixgbe_hw *hw = &adapter->hw;
2786         u32             wus;
2787
2788         INIT_DEBUGOUT("ixgbe_resume: begin");
2789
2790         IXGBE_CORE_LOCK(adapter);
2791
2792         /* Read & clear WUS register */
2793         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
2794         if (wus)
2795                 device_printf(dev, "Woken up by (WUS): %#010x\n",
2796                     IXGBE_READ_REG(hw, IXGBE_WUS));
2797         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
2798         /* And clear WUFC until next low-power transition */
2799         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
2800
2801         /*
2802          * Required after D3->D0 transition;
2803          * will re-advertise all previous advertised speeds
2804          */
2805         if (ifp->if_flags & IFF_UP)
2806                 ixgbe_init_locked(adapter);
2807
2808         IXGBE_CORE_UNLOCK(adapter);
2809
2810         return (0);
2811 } /* ixgbe_resume */
2812
2813 /************************************************************************
2814  * ixgbe_set_if_hwassist - Set the various hardware offload abilities.
2815  *
2816  *   Takes the ifnet's if_capenable flags (e.g. set by the user using
2817  *   ifconfig) and indicates to the OS via the ifnet's if_hwassist
2818  *   field what mbuf offload flags the driver will understand.
2819  ************************************************************************/
2820 static void
2821 ixgbe_set_if_hwassist(struct adapter *adapter)
2822 {
2823         struct ifnet *ifp = adapter->ifp;
2824
2825         ifp->if_hwassist = 0;
2826 #if __FreeBSD_version >= 1000000
2827         if (ifp->if_capenable & IFCAP_TSO4)
2828                 ifp->if_hwassist |= CSUM_IP_TSO;
2829         if (ifp->if_capenable & IFCAP_TSO6)
2830                 ifp->if_hwassist |= CSUM_IP6_TSO;
2831         if (ifp->if_capenable & IFCAP_TXCSUM) {
2832                 ifp->if_hwassist |= (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP);
2833                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2834                         ifp->if_hwassist |= CSUM_IP_SCTP;
2835         }
2836         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) {
2837                 ifp->if_hwassist |= (CSUM_IP6_UDP | CSUM_IP6_TCP);
2838                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2839                         ifp->if_hwassist |= CSUM_IP6_SCTP;
2840         }
2841 #else
2842         if (ifp->if_capenable & IFCAP_TSO)
2843                 ifp->if_hwassist |= CSUM_TSO;
2844         if (ifp->if_capenable & IFCAP_TXCSUM) {
2845                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
2846                 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2847                         ifp->if_hwassist |= CSUM_SCTP;
2848         }
2849 #endif
2850 } /* ixgbe_set_if_hwassist */
2851
2852 /************************************************************************
2853  * ixgbe_init_locked - Init entry point
2854  *
2855  *   Used in two ways: It is used by the stack as an init
2856  *   entry point in network interface structure. It is also
2857  *   used by the driver as a hw/sw initialization routine to
2858  *   get to a consistent state.
2859  *
2860  *   return 0 on success, positive on failure
2861  ************************************************************************/
2862 void
2863 ixgbe_init_locked(struct adapter *adapter)
2864 {
2865         struct ifnet    *ifp = adapter->ifp;
2866         device_t        dev = adapter->dev;
2867         struct ixgbe_hw *hw = &adapter->hw;
2868         struct tx_ring  *txr;
2869         struct rx_ring  *rxr;
2870         u32             txdctl, mhadd;
2871         u32             rxdctl, rxctrl;
2872         u32             ctrl_ext;
2873         int             err = 0;
2874
2875         mtx_assert(&adapter->core_mtx, MA_OWNED);
2876         INIT_DEBUGOUT("ixgbe_init_locked: begin");
2877
2878         hw->adapter_stopped = FALSE;
2879         ixgbe_stop_adapter(hw);
2880         callout_stop(&adapter->timer);
2881
2882         /* Queue indices may change with IOV mode */
2883         ixgbe_align_all_queue_indices(adapter);
2884
2885         /* reprogram the RAR[0] in case user changed it. */
2886         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
2887
2888         /* Get the latest mac address, User can use a LAA */
2889         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
2890         ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
2891         hw->addr_ctrl.rar_used_count = 1;
2892
2893         /* Set hardware offload abilities from ifnet flags */
2894         ixgbe_set_if_hwassist(adapter);
2895
2896         /* Prepare transmit descriptors and buffers */
2897         if (ixgbe_setup_transmit_structures(adapter)) {
2898                 device_printf(dev, "Could not setup transmit structures\n");
2899                 ixgbe_stop(adapter);
2900                 return;
2901         }
2902
2903         ixgbe_init_hw(hw);
2904         ixgbe_initialize_iov(adapter);
2905         ixgbe_initialize_transmit_units(adapter);
2906
2907         /* Setup Multicast table */
2908         ixgbe_set_multi(adapter);
2909
2910         /* Determine the correct mbuf pool, based on frame size */
2911         if (adapter->max_frame_size <= MCLBYTES)
2912                 adapter->rx_mbuf_sz = MCLBYTES;
2913         else
2914                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
2915
2916         /* Prepare receive descriptors and buffers */
2917         if (ixgbe_setup_receive_structures(adapter)) {
2918                 device_printf(dev, "Could not setup receive structures\n");
2919                 ixgbe_stop(adapter);
2920                 return;
2921         }
2922
2923         /* Configure RX settings */
2924         ixgbe_initialize_receive_units(adapter);
2925
2926         /* Initialize variable holding task enqueue requests
2927          * generated by interrupt handlers */
2928         adapter->task_requests = 0;
2929
2930         /* Enable SDP & MSI-X interrupts based on adapter */
2931         ixgbe_config_gpie(adapter);
2932
2933         /* Set MTU size */
2934         if (ifp->if_mtu > ETHERMTU) {
2935                 /* aka IXGBE_MAXFRS on 82599 and newer */
2936                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2937                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2938                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
2939                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2940         }
2941
2942         /* Now enable all the queues */
2943         for (int i = 0; i < adapter->num_queues; i++) {
2944                 txr = &adapter->tx_rings[i];
2945                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
2946                 txdctl |= IXGBE_TXDCTL_ENABLE;
2947                 /* Set WTHRESH to 8, burst writeback */
2948                 txdctl |= (8 << 16);
2949                 /*
2950                  * When the internal queue falls below PTHRESH (32),
2951                  * start prefetching as long as there are at least
2952                  * HTHRESH (1) buffers ready. The values are taken
2953                  * from the Intel linux driver 3.8.21.
2954                  * Prefetching enables tx line rate even with 1 queue.
2955                  */
2956                 txdctl |= (32 << 0) | (1 << 8);
2957                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
2958         }
2959
2960         for (int i = 0, j = 0; i < adapter->num_queues; i++) {
2961                 rxr = &adapter->rx_rings[i];
2962                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2963                 if (hw->mac.type == ixgbe_mac_82598EB) {
2964                         /*
2965                          * PTHRESH = 21
2966                          * HTHRESH = 4
2967                          * WTHRESH = 8
2968                          */
2969                         rxdctl &= ~0x3FFFFF;
2970                         rxdctl |= 0x080420;
2971                 }
2972                 rxdctl |= IXGBE_RXDCTL_ENABLE;
2973                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
2974                 for (; j < 10; j++) {
2975                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
2976                             IXGBE_RXDCTL_ENABLE)
2977                                 break;
2978                         else
2979                                 msec_delay(1);
2980                 }
2981                 wmb();
2982
2983                 /*
2984                  * In netmap mode, we must preserve the buffers made
2985                  * available to userspace before the if_init()
2986                  * (this is true by default on the TX side, because
2987                  * init makes all buffers available to userspace).
2988                  *
2989                  * netmap_reset() and the device specific routines
2990                  * (e.g. ixgbe_setup_receive_rings()) map these
2991                  * buffers at the end of the NIC ring, so here we
2992                  * must set the RDT (tail) register to make sure
2993                  * they are not overwritten.
2994                  *
2995                  * In this driver the NIC ring starts at RDH = 0,
2996                  * RDT points to the last slot available for reception (?),
2997                  * so RDT = num_rx_desc - 1 means the whole ring is available.
2998                  */
2999 #ifdef DEV_NETMAP
3000                 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
3001                     (ifp->if_capenable & IFCAP_NETMAP)) {
3002                         struct netmap_adapter *na = NA(adapter->ifp);
3003                         struct netmap_kring *kring = na->rx_rings[i];
3004                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
3005
3006                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
3007                 } else
3008 #endif /* DEV_NETMAP */
3009                         IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
3010                             adapter->num_rx_desc - 1);
3011         }
3012
3013         /* Enable Receive engine */
3014         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3015         if (hw->mac.type == ixgbe_mac_82598EB)
3016                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3017         rxctrl |= IXGBE_RXCTRL_RXEN;
3018         ixgbe_enable_rx_dma(hw, rxctrl);
3019
3020         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3021
3022         /* Set up MSI-X routing */
3023         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3024                 ixgbe_configure_ivars(adapter);
3025                 /* Set up auto-mask */
3026                 if (hw->mac.type == ixgbe_mac_82598EB)
3027                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3028                 else {
3029                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3030                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3031                 }
3032         } else {  /* Simple settings for Legacy/MSI */
3033                 ixgbe_set_ivar(adapter, 0, 0, 0);
3034                 ixgbe_set_ivar(adapter, 0, 0, 1);
3035                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3036         }
3037
3038         ixgbe_init_fdir(adapter);
3039
3040         /*
3041          * Check on any SFP devices that
3042          * need to be kick-started
3043          */
3044         if (hw->phy.type == ixgbe_phy_none) {
3045                 err = hw->phy.ops.identify(hw);
3046                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3047                         device_printf(dev,
3048                             "Unsupported SFP+ module type was detected.\n");
3049                         return;
3050                 }
3051         }
3052
3053         /* Set moderation on the Link interrupt */
3054         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
3055
3056         /* Config/Enable Link */
3057         ixgbe_config_link(adapter);
3058
3059         /* Hardware Packet Buffer & Flow Control setup */
3060         ixgbe_config_delay_values(adapter);
3061
3062         /* Initialize the FC settings */
3063         ixgbe_start_hw(hw);
3064
3065         /* Set up VLAN support and filter */
3066         ixgbe_setup_vlan_hw_support(adapter);
3067
3068         /* Setup DMA Coalescing */
3069         ixgbe_config_dmac(adapter);
3070
3071         /* And now turn on interrupts */
3072         ixgbe_enable_intr(adapter, false);
3073
3074         /* Enable the use of the MBX by the VF's */
3075         if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
3076                 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3077                 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3078                 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3079         }
3080
3081         /* Now inform the stack we're ready */
3082         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3083
3084         return;
3085 } /* ixgbe_init_locked */
3086
3087 /************************************************************************
3088  * ixgbe_init
3089  ************************************************************************/
3090 static void
3091 ixgbe_init(void *arg)
3092 {
3093         struct adapter *adapter = arg;
3094
3095         IXGBE_CORE_LOCK(adapter);
3096         ixgbe_init_locked(adapter);
3097         IXGBE_CORE_UNLOCK(adapter);
3098
3099         return;
3100 } /* ixgbe_init */
3101
3102 /************************************************************************
3103  * ixgbe_set_ivar
3104  *
3105  *   Setup the correct IVAR register for a particular MSI-X interrupt
3106  *     (yes this is all very magic and confusing :)
3107  *    - entry is the register array entry
3108  *    - vector is the MSI-X vector for this queue
3109  *    - type is RX/TX/MISC
3110  ************************************************************************/
3111 static void
3112 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3113 {
3114         struct ixgbe_hw *hw = &adapter->hw;
3115         u32 ivar, index;
3116
3117         vector |= IXGBE_IVAR_ALLOC_VAL;
3118
3119         switch (hw->mac.type) {
3120
3121         case ixgbe_mac_82598EB:
3122                 if (type == -1)
3123                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3124                 else
3125                         entry += (type * 64);
3126                 index = (entry >> 2) & 0x1F;
3127                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3128                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3129                 ivar |= (vector << (8 * (entry & 0x3)));
3130                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3131                 break;
3132
3133         case ixgbe_mac_82599EB:
3134         case ixgbe_mac_X540:
3135         case ixgbe_mac_X550:
3136         case ixgbe_mac_X550EM_x:
3137         case ixgbe_mac_X550EM_a:
3138                 if (type == -1) { /* MISC IVAR */
3139                         index = (entry & 1) * 8;
3140                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3141                         ivar &= ~(0xFF << index);
3142                         ivar |= (vector << index);
3143                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3144                 } else {          /* RX/TX IVARS */
3145                         index = (16 * (entry & 1)) + (8 * type);
3146                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3147                         ivar &= ~(0xFF << index);
3148                         ivar |= (vector << index);
3149                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3150                 }
3151
3152         default:
3153                 break;
3154         }
3155 } /* ixgbe_set_ivar */
3156
3157 /************************************************************************
3158  * ixgbe_configure_ivars
3159  ************************************************************************/
3160 static void
3161 ixgbe_configure_ivars(struct adapter *adapter)
3162 {
3163         struct ix_queue *que = adapter->queues;
3164         u32             newitr;
3165
3166         if (ixgbe_max_interrupt_rate > 0)
3167                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3168         else {
3169                 /*
3170                  * Disable DMA coalescing if interrupt moderation is
3171                  * disabled.
3172                  */
3173                 adapter->dmac = 0;
3174                 newitr = 0;
3175         }
3176
3177         for (int i = 0; i < adapter->num_queues; i++, que++) {
3178                 struct rx_ring *rxr = &adapter->rx_rings[i];
3179                 struct tx_ring *txr = &adapter->tx_rings[i];
3180                 /* First the RX queue entry */
3181                 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
3182                 /* ... and the TX */
3183                 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
3184                 /* Set an Initial EITR value */
3185                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), newitr);
3186         }
3187
3188         /* For the Link interrupt */
3189         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3190 } /* ixgbe_configure_ivars */
3191
3192 /************************************************************************
3193  * ixgbe_config_gpie
3194  ************************************************************************/
3195 static void
3196 ixgbe_config_gpie(struct adapter *adapter)
3197 {
3198         struct ixgbe_hw *hw = &adapter->hw;
3199         u32             gpie;
3200
3201         gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3202
3203         if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
3204                 /* Enable Enhanced MSI-X mode */
3205                 gpie |= IXGBE_GPIE_MSIX_MODE
3206                      |  IXGBE_GPIE_EIAME
3207                      |  IXGBE_GPIE_PBA_SUPPORT
3208                      |  IXGBE_GPIE_OCD;
3209         }
3210
3211         /* Fan Failure Interrupt */
3212         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3213                 gpie |= IXGBE_SDP1_GPIEN;
3214
3215         /* Thermal Sensor Interrupt */
3216         if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
3217                 gpie |= IXGBE_SDP0_GPIEN_X540;
3218
3219         /* Link detection */
3220         switch (hw->mac.type) {
3221         case ixgbe_mac_82599EB:
3222                 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
3223                 break;
3224         case ixgbe_mac_X550EM_x:
3225         case ixgbe_mac_X550EM_a:
3226                 gpie |= IXGBE_SDP0_GPIEN_X540;
3227                 break;
3228         default:
3229                 break;
3230         }
3231
3232         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3233
3234         return;
3235 } /* ixgbe_config_gpie */
3236
3237 /************************************************************************
3238  * ixgbe_config_delay_values
3239  *
3240  *   Requires adapter->max_frame_size to be set.
3241  ************************************************************************/
3242 static void
3243 ixgbe_config_delay_values(struct adapter *adapter)
3244 {
3245         struct ixgbe_hw *hw = &adapter->hw;
3246         u32             rxpb, frame, size, tmp;
3247
3248         frame = adapter->max_frame_size;
3249
3250         /* Calculate High Water */
3251         switch (hw->mac.type) {
3252         case ixgbe_mac_X540:
3253         case ixgbe_mac_X550:
3254         case ixgbe_mac_X550EM_x:
3255         case ixgbe_mac_X550EM_a:
3256                 tmp = IXGBE_DV_X540(frame, frame);
3257                 break;
3258         default:
3259                 tmp = IXGBE_DV(frame, frame);
3260                 break;
3261         }
3262         size = IXGBE_BT2KB(tmp);
3263         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
3264         hw->fc.high_water[0] = rxpb - size;
3265
3266         /* Now calculate Low Water */
3267         switch (hw->mac.type) {
3268         case ixgbe_mac_X540:
3269         case ixgbe_mac_X550:
3270         case ixgbe_mac_X550EM_x:
3271         case ixgbe_mac_X550EM_a:
3272                 tmp = IXGBE_LOW_DV_X540(frame);
3273                 break;
3274         default:
3275                 tmp = IXGBE_LOW_DV(frame);
3276                 break;
3277         }
3278         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
3279
3280         hw->fc.pause_time = IXGBE_FC_PAUSE;
3281         hw->fc.send_xon = TRUE;
3282 } /* ixgbe_config_delay_values */
3283
3284 /************************************************************************
3285  * ixgbe_set_multi - Multicast Update
3286  *
3287  *   Called whenever multicast address list is updated.
3288  ************************************************************************/
3289 static void
3290 ixgbe_set_multi(struct adapter *adapter)
3291 {
3292         struct ifmultiaddr   *ifma;
3293         struct ixgbe_mc_addr *mta;
3294         struct ifnet         *ifp = adapter->ifp;
3295         u8                   *update_ptr;
3296         int                  mcnt = 0;
3297         u32                  fctrl;
3298
3299         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
3300
3301         mta = adapter->mta;
3302         bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
3303
3304 #if __FreeBSD_version < 800000
3305         IF_ADDR_LOCK(ifp);
3306 #else
3307         if_maddr_rlock(ifp);
3308 #endif
3309         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3310                 if (ifma->ifma_addr->sa_family != AF_LINK)
3311                         continue;
3312                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3313                         break;
3314                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
3315                     mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
3316                 mta[mcnt].vmdq = adapter->pool;
3317                 mcnt++;
3318         }
3319 #if __FreeBSD_version < 800000
3320         IF_ADDR_UNLOCK(ifp);
3321 #else
3322         if_maddr_runlock(ifp);
3323 #endif
3324
3325         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3326         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3327         if (ifp->if_flags & IFF_PROMISC)
3328                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3329         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
3330             ifp->if_flags & IFF_ALLMULTI) {
3331                 fctrl |= IXGBE_FCTRL_MPE;
3332                 fctrl &= ~IXGBE_FCTRL_UPE;
3333         } else
3334                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3335
3336         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
3337
3338         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
3339                 update_ptr = (u8 *)mta;
3340                 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
3341                     ixgbe_mc_array_itr, TRUE);
3342         }
3343
3344         return;
3345 } /* ixgbe_set_multi */
3346
3347 /************************************************************************
3348  * ixgbe_mc_array_itr
3349  *
3350  *   An iterator function needed by the multicast shared code.
3351  *   It feeds the shared code routine the addresses in the
3352  *   array of ixgbe_set_multi() one by one.
3353  ************************************************************************/
3354 static u8 *
3355 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
3356 {
3357         struct ixgbe_mc_addr *mta;
3358
3359         mta = (struct ixgbe_mc_addr *)*update_ptr;
3360         *vmdq = mta->vmdq;
3361
3362         *update_ptr = (u8*)(mta + 1);
3363
3364         return (mta->addr);
3365 } /* ixgbe_mc_array_itr */
3366
3367 /************************************************************************
3368  * ixgbe_local_timer - Timer routine
3369  *
3370  *   Checks for link status, updates statistics,
3371  *   and runs the watchdog check.
3372  ************************************************************************/
3373 static void
3374 ixgbe_local_timer(void *arg)
3375 {
3376         struct adapter  *adapter = arg;
3377         device_t        dev = adapter->dev;
3378         struct ix_queue *que = adapter->queues;
3379         u64             queues = 0;
3380         int             hung = 0;
3381
3382         mtx_assert(&adapter->core_mtx, MA_OWNED);
3383
3384         /* Check for pluggable optics */
3385         if (adapter->sfp_probe)
3386                 if (!ixgbe_sfp_probe(adapter))
3387                         goto out; /* Nothing to do */
3388
3389         ixgbe_update_link_status(adapter);
3390         ixgbe_update_stats_counters(adapter);
3391
3392         /*
3393          * Check the TX queues status
3394          *      - mark hung queues so we don't schedule on them
3395          *      - watchdog only if all queues show hung
3396          */
3397         for (int i = 0; i < adapter->num_queues; i++, que++) {
3398                 /* Keep track of queues with work for soft irq */
3399                 if (que->txr->busy)
3400                         queues |= ((u64)1 << que->me);
3401                 /*
3402                  * Each time txeof runs without cleaning, but there
3403                  * are uncleaned descriptors it increments busy. If
3404                  * we get to the MAX we declare it hung.
3405                  */
3406                 if (que->busy == IXGBE_QUEUE_HUNG) {
3407                         ++hung;
3408                         /* Mark the queue as inactive */
3409                         adapter->active_queues &= ~((u64)1 << que->me);
3410                         continue;
3411                 } else {
3412                         /* Check if we've come back from hung */
3413                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
3414                                 adapter->active_queues |= ((u64)1 << que->me);
3415                 }
3416                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
3417                         device_printf(dev,
3418                             "Warning queue %d appears to be hung!\n", i);
3419                         que->txr->busy = IXGBE_QUEUE_HUNG;
3420                         ++hung;
3421                 }
3422         }
3423
3424         /* Only truly watchdog if all queues show hung */
3425         if (hung == adapter->num_queues)
3426                 goto watchdog;
3427         else if (queues != 0) { /* Force an IRQ on queues with work */
3428                 ixgbe_rearm_queues(adapter, queues);
3429         }
3430
3431 out:
3432         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
3433         return;
3434
3435 watchdog:
3436         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
3437         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3438         adapter->watchdog_events++;
3439         ixgbe_init_locked(adapter);
3440 } /* ixgbe_local_timer */
3441
3442 /************************************************************************
3443  * ixgbe_sfp_probe
3444  *
3445  *   Determine if a port had optics inserted.
3446  ************************************************************************/
3447 static bool
3448 ixgbe_sfp_probe(struct adapter *adapter)
3449 {
3450         struct ixgbe_hw *hw = &adapter->hw;
3451         device_t        dev = adapter->dev;
3452         bool            result = FALSE;
3453
3454         if ((hw->phy.type == ixgbe_phy_nl) &&
3455             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3456                 s32 ret = hw->phy.ops.identify_sfp(hw);
3457                 if (ret)
3458                         goto out;
3459                 ret = hw->phy.ops.reset(hw);
3460                 adapter->sfp_probe = FALSE;
3461                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3462                         device_printf(dev, "Unsupported SFP+ module detected!");
3463                         device_printf(dev,
3464                             "Reload driver with supported module.\n");
3465                         goto out;
3466                 } else
3467                         device_printf(dev, "SFP+ module detected!\n");
3468                 /* We now have supported optics */
3469                 result = TRUE;
3470         }
3471 out:
3472
3473         return (result);
3474 } /* ixgbe_sfp_probe */
3475
3476 /************************************************************************
3477  * ixgbe_handle_mod - Tasklet for SFP module interrupts
3478  ************************************************************************/
3479 static void
3480 ixgbe_handle_mod(void *context)
3481 {
3482         struct adapter  *adapter = context;
3483         struct ixgbe_hw *hw = &adapter->hw;
3484         device_t        dev = adapter->dev;
3485         u32             err, cage_full = 0;
3486
3487         if (adapter->hw.need_crosstalk_fix) {
3488                 switch (hw->mac.type) {
3489                 case ixgbe_mac_82599EB:
3490                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3491                             IXGBE_ESDP_SDP2;
3492                         break;
3493                 case ixgbe_mac_X550EM_x:
3494                 case ixgbe_mac_X550EM_a:
3495                         cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
3496                             IXGBE_ESDP_SDP0;
3497                         break;
3498                 default:
3499                         break;
3500                 }
3501
3502                 if (!cage_full)
3503                         return;
3504         }
3505
3506         err = hw->phy.ops.identify_sfp(hw);
3507         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3508                 device_printf(dev,
3509                     "Unsupported SFP+ module type was detected.\n");
3510                 goto handle_mod_out;
3511         }
3512
3513         err = hw->mac.ops.setup_sfp(hw);
3514         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3515                 device_printf(dev,
3516                     "Setup failure - unsupported SFP+ module type.\n");
3517                 goto handle_mod_out;
3518         }
3519         adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3520         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3521         return;
3522
3523 handle_mod_out:
3524         adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF);
3525 } /* ixgbe_handle_mod */
3526
3527
3528 /************************************************************************
3529  * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
3530  ************************************************************************/
3531 static void
3532 ixgbe_handle_msf(void *context)
3533 {
3534         struct adapter  *adapter = context;
3535         struct ixgbe_hw *hw = &adapter->hw;
3536         u32             autoneg;
3537         bool            negotiate;
3538
3539         /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
3540         adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
3541
3542         autoneg = hw->phy.autoneg_advertised;
3543         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3544                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3545         if (hw->mac.ops.setup_link)
3546                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3547
3548         /* Adjust media types shown in ifconfig */
3549         ifmedia_removeall(&adapter->media);
3550         ixgbe_add_media_types(adapter);
3551         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3552 } /* ixgbe_handle_msf */
3553
3554 /************************************************************************
3555  * ixgbe_handle_phy - Tasklet for external PHY interrupts
3556  ************************************************************************/
3557 static void
3558 ixgbe_handle_phy(void *context)
3559 {
3560         struct adapter  *adapter = context;
3561         struct ixgbe_hw *hw = &adapter->hw;
3562         int             error;
3563
3564         error = hw->phy.ops.handle_lasi(hw);
3565         if (error == IXGBE_ERR_OVERTEMP)
3566                 device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!!  PHY will downshift to lower power state!\n");
3567         else if (error)
3568                 device_printf(adapter->dev,
3569                     "Error handling LASI interrupt: %d\n", error);
3570 } /* ixgbe_handle_phy */
3571
3572 /************************************************************************
3573  * ixgbe_handle_admin_task - Handler for interrupt tasklets meant to be
3574  *     called in separate task.
3575  ************************************************************************/
3576 static void
3577 ixgbe_handle_admin_task(void *context, int pending)
3578 {
3579         struct adapter  *adapter = context;
3580
3581         IXGBE_CORE_LOCK(adapter);
3582         ixgbe_disable_intr(adapter, true);
3583
3584         if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD)
3585                 ixgbe_handle_mod(adapter);
3586         if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF)
3587                 ixgbe_handle_msf(adapter);
3588         if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX)
3589                 ixgbe_handle_mbx(adapter);
3590         if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR)
3591                 ixgbe_reinit_fdir(adapter);
3592         if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY)
3593                 ixgbe_handle_phy(adapter);
3594         if (adapter->task_requests & IXGBE_REQUEST_TASK_LINK)
3595                 ixgbe_handle_link(adapter);
3596         adapter->task_requests = 0;
3597
3598         ixgbe_enable_intr(adapter, true);
3599         IXGBE_CORE_UNLOCK(adapter);
3600 } /* ixgbe_handle_admin_task */
3601
3602 /************************************************************************
3603  * ixgbe_stop - Stop the hardware
3604  *
3605  *   Disables all traffic on the adapter by issuing a
3606  *   global reset on the MAC and deallocates TX/RX buffers.
3607  ************************************************************************/
3608 static void
3609 ixgbe_stop(void *arg)
3610 {
3611         struct ifnet    *ifp;
3612         struct adapter  *adapter = arg;
3613         struct ixgbe_hw *hw = &adapter->hw;
3614
3615         ifp = adapter->ifp;
3616
3617         mtx_assert(&adapter->core_mtx, MA_OWNED);
3618
3619         INIT_DEBUGOUT("ixgbe_stop: begin\n");
3620         ixgbe_disable_intr(adapter, false);
3621         callout_stop(&adapter->timer);
3622
3623         /* Let the stack know...*/
3624         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3625
3626         ixgbe_reset_hw(hw);
3627         hw->adapter_stopped = FALSE;
3628         ixgbe_stop_adapter(hw);
3629         if (hw->mac.type == ixgbe_mac_82599EB)
3630                 ixgbe_stop_mac_link_on_d3_82599(hw);
3631         /* Turn off the laser - noop with no optics */
3632         ixgbe_disable_tx_laser(hw);
3633
3634         /* Update the stack */
3635         adapter->link_up = FALSE;
3636         ixgbe_update_link_status(adapter);
3637
3638         /* reprogram the RAR[0] in case user changed it. */
3639         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3640
3641         return;
3642 } /* ixgbe_stop */
3643
3644 /************************************************************************
3645  * ixgbe_update_link_status - Update OS on link state
3646  *
3647  * Note: Only updates the OS on the cached link state.
3648  *       The real check of the hardware only happens with
3649  *       a link interrupt.
3650  ************************************************************************/
3651 static void
3652 ixgbe_update_link_status(struct adapter *adapter)
3653 {
3654         struct ifnet *ifp = adapter->ifp;
3655         device_t     dev = adapter->dev;
3656
3657         if (adapter->link_up) {
3658                 if (adapter->link_active == FALSE) {
3659                         if (bootverbose)
3660                                 device_printf(dev, "Link is up %d Gbps %s \n",
3661                                     ((adapter->link_speed == 128) ? 10 : 1),
3662                                     "Full Duplex");
3663                         adapter->link_active = TRUE;
3664                         /* Update any Flow Control changes */
3665                         ixgbe_fc_enable(&adapter->hw);
3666                         /* Update DMA coalescing config */
3667                         ixgbe_config_dmac(adapter);
3668                         if_link_state_change(ifp, LINK_STATE_UP);
3669                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3670                                 ixgbe_ping_all_vfs(adapter);
3671                 }
3672         } else { /* Link down */
3673                 if (adapter->link_active == TRUE) {
3674                         if (bootverbose)
3675                                 device_printf(dev, "Link is Down\n");
3676                         if_link_state_change(ifp, LINK_STATE_DOWN);
3677                         adapter->link_active = FALSE;
3678                         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3679                                 ixgbe_ping_all_vfs(adapter);
3680                 }
3681         }
3682
3683         return;
3684 } /* ixgbe_update_link_status */
3685
3686 /************************************************************************
3687  * ixgbe_config_dmac - Configure DMA Coalescing
3688  ************************************************************************/
3689 static void
3690 ixgbe_config_dmac(struct adapter *adapter)
3691 {
3692         struct ixgbe_hw          *hw = &adapter->hw;
3693         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3694
3695         if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
3696                 return;
3697
3698         if (dcfg->watchdog_timer ^ adapter->dmac ||
3699             dcfg->link_speed ^ adapter->link_speed) {
3700                 dcfg->watchdog_timer = adapter->dmac;
3701                 dcfg->fcoe_en = false;
3702                 dcfg->link_speed = adapter->link_speed;
3703                 dcfg->num_tcs = 1;
3704
3705                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3706                     dcfg->watchdog_timer, dcfg->link_speed);
3707
3708                 hw->mac.ops.dmac_config(hw);
3709         }
3710 } /* ixgbe_config_dmac */
3711
3712 /************************************************************************
3713  * ixgbe_enable_intr
3714  *     If skip_traffic parameter is set, queues' irqs are not enabled.
3715  *     This is useful while reenabling interrupts after disabling them
3716  *     with ixgbe_disable_intr() 'keep_traffic' parameter set to true
3717  *     as queues' interrupts are already enabled.
3718  ************************************************************************/
3719 static void
3720 ixgbe_enable_intr(struct adapter *adapter, bool skip_traffic)
3721 {
3722         struct ixgbe_hw *hw = &adapter->hw;
3723         struct ix_queue *que = adapter->queues;
3724         u32             mask, fwsm;
3725
3726         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3727
3728         switch (adapter->hw.mac.type) {
3729         case ixgbe_mac_82599EB:
3730                 mask |= IXGBE_EIMS_ECC;
3731                 /* Temperature sensor on some adapters */
3732                 mask |= IXGBE_EIMS_GPI_SDP0;
3733                 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3734                 mask |= IXGBE_EIMS_GPI_SDP1;
3735                 mask |= IXGBE_EIMS_GPI_SDP2;
3736                 break;
3737         case ixgbe_mac_X540:
3738                 /* Detect if Thermal Sensor is enabled */
3739                 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3740                 if (fwsm & IXGBE_FWSM_TS_ENABLED)
3741                         mask |= IXGBE_EIMS_TS;
3742                 mask |= IXGBE_EIMS_ECC;
3743                 break;
3744         case ixgbe_mac_X550:
3745                 /* MAC thermal sensor is automatically enabled */
3746                 mask |= IXGBE_EIMS_TS;
3747                 mask |= IXGBE_EIMS_ECC;
3748                 break;
3749         case ixgbe_mac_X550EM_x:
3750         case ixgbe_mac_X550EM_a:
3751                 /* Some devices use SDP0 for important information */
3752                 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3753                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
3754                     hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
3755                     hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3756                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3757                 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
3758                         mask |= IXGBE_EICR_GPI_SDP0_X540;
3759                 mask |= IXGBE_EIMS_ECC;
3760                 break;
3761         default:
3762                 break;
3763         }
3764
3765         /* Enable Fan Failure detection */
3766         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
3767                 mask |= IXGBE_EIMS_GPI_SDP1;
3768         /* Enable SR-IOV */
3769         if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
3770                 mask |= IXGBE_EIMS_MAILBOX;
3771         /* Enable Flow Director */
3772         if (adapter->feat_en & IXGBE_FEATURE_FDIR)
3773                 mask |= IXGBE_EIMS_FLOW_DIR;
3774
3775         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3776
3777         /* With MSI-X we use auto clear */
3778         if (adapter->msix_mem) {
3779                 mask = IXGBE_EIMS_ENABLE_MASK;
3780                 /* Don't autoclear Link */
3781                 mask &= ~IXGBE_EIMS_OTHER;
3782                 mask &= ~IXGBE_EIMS_LSC;
3783                 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
3784                         mask &= ~IXGBE_EIMS_MAILBOX;
3785                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3786         }
3787
3788         if (!skip_traffic) {
3789                 /*
3790                  * Now enable all queues, this is done separately to
3791                  * allow for handling the extended (beyond 32) MSI-X
3792                  * vectors that can be used by 82599
3793                  */
3794                 for (int i = 0; i < adapter->num_queues; i++, que++)
3795                         ixgbe_enable_queue(adapter, que->msix);
3796         }
3797
3798         IXGBE_WRITE_FLUSH(hw);
3799
3800         return;
3801 } /* ixgbe_enable_intr */
3802
3803 /************************************************************************
3804  * ixgbe_disable_intr
3805  *     If keep_traffic parameter is set, queue interrupts are not disabled.
3806  *     This is needed by ixgbe_handle_admin_task() to handle link specific
3807  *     interrupt procedures without stopping the traffic.
3808  ************************************************************************/
3809 static void
3810 ixgbe_disable_intr(struct adapter *adapter, bool keep_traffic)
3811 {
3812         struct ixgbe_hw *hw = &adapter->hw;
3813         u32 eiac_mask, eimc_mask, eimc_ext_mask;
3814
3815         if (keep_traffic) {
3816                 /* Autoclear only queue irqs */
3817                 eiac_mask = IXGBE_EICR_RTX_QUEUE;
3818
3819                 /* Disable everything but queue irqs */
3820                 eimc_mask = ~0;
3821                 eimc_mask &= ~IXGBE_EIMC_RTX_QUEUE;
3822                 eimc_ext_mask = 0;
3823         } else {
3824                 eiac_mask = 0;
3825                 eimc_mask = (hw->mac.type == ixgbe_mac_82598EB) ? ~0 : 0xFFFF0000;
3826                 eimc_ext_mask = ~0;
3827         }
3828
3829         if (adapter->msix_mem)
3830                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac_mask);
3831
3832         IXGBE_WRITE_REG(hw, IXGBE_EIMC, eimc_mask);
3833         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), eimc_ext_mask);
3834         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), eimc_ext_mask);
3835
3836         IXGBE_WRITE_FLUSH(hw);
3837
3838         return;
3839 } /* ixgbe_disable_intr */
3840
3841 /************************************************************************
3842  * ixgbe_legacy_irq - Legacy Interrupt Service routine
3843  ************************************************************************/
3844 static void
3845 ixgbe_legacy_irq(void *arg)
3846 {
3847         struct ix_queue *que = arg;
3848         struct adapter  *adapter = que->adapter;
3849         struct ixgbe_hw *hw = &adapter->hw;
3850         struct ifnet    *ifp = adapter->ifp;
3851         struct tx_ring  *txr = adapter->tx_rings;
3852         bool            more = false;
3853         u32             eicr, eicr_mask;
3854
3855         /* Silicon errata #26 on 82598 */
3856         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
3857
3858         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3859
3860         ++que->irqs;
3861         if (eicr == 0) {
3862                 ixgbe_enable_intr(adapter, false);
3863                 return;
3864         }
3865
3866         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3867                 more = ixgbe_rxeof(que);
3868
3869                 IXGBE_TX_LOCK(txr);
3870                 ixgbe_txeof(txr);
3871                 if (!ixgbe_ring_empty(ifp, txr->br))
3872                         ixgbe_start_locked(ifp, txr);
3873                 IXGBE_TX_UNLOCK(txr);
3874         }
3875
3876         /* Check for fan failure */
3877         if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3878                 ixgbe_check_fan_failure(adapter, eicr, true);
3879                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3880         }
3881
3882         /* Link status change */
3883         if (eicr & IXGBE_EICR_LSC){
3884                 adapter->task_requests |= IXGBE_REQUEST_TASK_LINK;
3885                 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3886         }
3887
3888         if (ixgbe_is_sfp(hw)) {
3889                 /* Pluggable optics-related interrupt */
3890                 if (hw->mac.type >= ixgbe_mac_X540)
3891                         eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3892                 else
3893                         eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3894
3895                 if (eicr & eicr_mask) {
3896                         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3897                         adapter->task_requests |= IXGBE_REQUEST_TASK_MOD;
3898                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3899                 }
3900
3901                 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3902                     (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3903                         IXGBE_WRITE_REG(hw, IXGBE_EICR,
3904                             IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3905                         adapter->task_requests |= IXGBE_REQUEST_TASK_MSF;
3906                         taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3907                 }
3908         }
3909
3910         /* External PHY interrupt */
3911         if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3912             (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3913                 adapter->task_requests |= IXGBE_REQUEST_TASK_PHY;
3914                 taskqueue_enqueue(adapter->tq, &adapter->admin_task);
3915         }
3916
3917         if (more)
3918                 taskqueue_enqueue(que->tq, &que->que_task);
3919         else
3920                 ixgbe_enable_intr(adapter, false);
3921
3922         return;
3923 } /* ixgbe_legacy_irq */
3924
3925 /************************************************************************
3926  * ixgbe_free_pci_resources
3927  ************************************************************************/
3928 static void
3929 ixgbe_free_pci_resources(struct adapter *adapter)
3930 {
3931         struct ix_queue *que = adapter->queues;
3932         device_t        dev = adapter->dev;
3933         int             rid, memrid;
3934
3935         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3936                 memrid = PCIR_BAR(MSIX_82598_BAR);
3937         else
3938                 memrid = PCIR_BAR(MSIX_82599_BAR);
3939
3940         /*
3941          * There is a slight possibility of a failure mode
3942          * in attach that will result in entering this function
3943          * before interrupt resources have been initialized, and
3944          * in that case we do not want to execute the loops below
3945          * We can detect this reliably by the state of the adapter
3946          * res pointer.
3947          */
3948         if (adapter->res == NULL)
3949                 goto mem;
3950
3951         /*
3952          * Release all msix queue resources:
3953          */
3954         for (int i = 0; i < adapter->num_queues; i++, que++) {
3955                 rid = que->msix + 1;
3956                 if (que->tag != NULL) {
3957                         bus_teardown_intr(dev, que->res, que->tag);
3958                         que->tag = NULL;
3959                 }
3960                 if (que->res != NULL)
3961                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
3962         }
3963
3964
3965         if (adapter->tag != NULL) {
3966                 bus_teardown_intr(dev, adapter->res, adapter->tag);
3967                 adapter->tag = NULL;
3968         }
3969
3970         /* Clean the Legacy or Link interrupt last */
3971         if (adapter->res != NULL)
3972                 bus_release_resource(dev, SYS_RES_IRQ, adapter->link_rid,
3973                     adapter->res);
3974
3975 mem:
3976         if ((adapter->feat_en & IXGBE_FEATURE_MSI) ||
3977             (adapter->feat_en & IXGBE_FEATURE_MSIX))
3978                 pci_release_msi(dev);
3979
3980         if (adapter->msix_mem != NULL)
3981                 bus_release_resource(dev, SYS_RES_MEMORY, memrid,
3982                     adapter->msix_mem);
3983
3984         if (adapter->pci_mem != NULL)
3985                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3986                     adapter->pci_mem);
3987
3988         return;
3989 } /* ixgbe_free_pci_resources */
3990
3991 /************************************************************************
3992  * ixgbe_set_sysctl_value
3993  ************************************************************************/
3994 static void
3995 ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
3996     const char *description, int *limit, int value)
3997 {
3998         *limit = value;
3999         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4000             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4001             OID_AUTO, name, CTLFLAG_RW, limit, value, description);
4002 } /* ixgbe_set_sysctl_value */
4003
4004 /************************************************************************
4005  * ixgbe_sysctl_flowcntl
4006  *
4007  *   SYSCTL wrapper around setting Flow Control
4008  ************************************************************************/
4009 static int
4010 ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
4011 {
4012         struct adapter *adapter;
4013         int            error, fc;
4014
4015         adapter = (struct adapter *)arg1;
4016         fc = adapter->hw.fc.current_mode;
4017
4018         error = sysctl_handle_int(oidp, &fc, 0, req);
4019         if ((error) || (req->newptr == NULL))
4020                 return (error);
4021
4022         /* Don't bother if it's not changed */
4023         if (fc == adapter->hw.fc.current_mode)
4024                 return (0);
4025
4026         return ixgbe_set_flowcntl(adapter, fc);
4027 } /* ixgbe_sysctl_flowcntl */
4028
4029 /************************************************************************
4030  * ixgbe_set_flowcntl - Set flow control
4031  *
4032  *   Flow control values:
4033  *     0 - off
4034  *     1 - rx pause
4035  *     2 - tx pause
4036  *     3 - full
4037  ************************************************************************/
4038 static int
4039 ixgbe_set_flowcntl(struct adapter *adapter, int fc)
4040 {
4041         switch (fc) {
4042         case ixgbe_fc_rx_pause:
4043         case ixgbe_fc_tx_pause:
4044         case ixgbe_fc_full:
4045                 adapter->hw.fc.requested_mode = fc;
4046                 if (adapter->num_queues > 1)
4047                         ixgbe_disable_rx_drop(adapter);
4048                 break;
4049         case ixgbe_fc_none:
4050                 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4051                 if (adapter->num_queues > 1)
4052                         ixgbe_enable_rx_drop(adapter);
4053                 break;
4054         default:
4055                 return (EINVAL);
4056         }
4057
4058         /* Don't autoneg if forcing a value */
4059         adapter->hw.fc.disable_fc_autoneg = TRUE;
4060         ixgbe_fc_enable(&adapter->hw);
4061
4062         return (0);
4063 } /* ixgbe_set_flowcntl */
4064
4065 /************************************************************************
4066  * ixgbe_enable_rx_drop
4067  *
4068  *   Enable the hardware to drop packets when the buffer is
4069  *   full. This is useful with multiqueue, so that no single
4070  *   queue being full stalls the entire RX engine. We only
4071  *   enable this when Multiqueue is enabled AND Flow Control
4072  *   is disabled.
4073  ************************************************************************/
4074 static void
4075 ixgbe_enable_rx_drop(struct adapter *adapter)
4076 {
4077         struct ixgbe_hw *hw = &adapter->hw;
4078         struct rx_ring  *rxr;
4079         u32             srrctl;
4080
4081         for (int i = 0; i < adapter->num_queues; i++) {
4082                 rxr = &adapter->rx_rings[i];
4083                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4084                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4085                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4086         }
4087
4088         /* enable drop for each vf */
4089         for (int i = 0; i < adapter->num_vfs; i++) {
4090                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4091                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
4092                     IXGBE_QDE_ENABLE));
4093         }
4094 } /* ixgbe_enable_rx_drop */
4095
4096 /************************************************************************
4097  * ixgbe_disable_rx_drop
4098  ************************************************************************/
4099 static void
4100 ixgbe_disable_rx_drop(struct adapter *adapter)
4101 {
4102         struct ixgbe_hw *hw = &adapter->hw;
4103         struct rx_ring  *rxr;
4104         u32             srrctl;
4105
4106         for (int i = 0; i < adapter->num_queues; i++) {
4107                 rxr = &adapter->rx_rings[i];
4108                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
4109                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4110                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
4111         }
4112
4113         /* disable drop for each vf */
4114         for (int i = 0; i < adapter->num_vfs; i++) {
4115                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
4116                     (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
4117         }
4118 } /* ixgbe_disable_rx_drop */
4119
4120 /************************************************************************
4121  * ixgbe_sysctl_advertise
4122  *
4123  *   SYSCTL wrapper around setting advertised speed
4124  ************************************************************************/
4125 static int
4126 ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
4127 {
4128         struct adapter *adapter;
4129         int            error, advertise;
4130
4131         adapter = (struct adapter *)arg1;
4132         advertise = adapter->advertise;
4133
4134         error = sysctl_handle_int(oidp, &advertise, 0, req);
4135         if ((error) || (req->newptr == NULL))
4136                 return (error);
4137
4138         return ixgbe_set_advertise(adapter, advertise);
4139 } /* ixgbe_sysctl_advertise */
4140
4141 /************************************************************************
4142  * ixgbe_set_advertise - Control advertised link speed
4143  *
4144  *   Flags:
4145  *     0x1 - advertise 100 Mb
4146  *     0x2 - advertise 1G
4147  *     0x4 - advertise 10G
4148  *     0x8 - advertise 10 Mb (yes, Mb)
4149  ************************************************************************/
4150 static int
4151 ixgbe_set_advertise(struct adapter *adapter, int advertise)
4152 {
4153         device_t         dev;
4154         struct ixgbe_hw  *hw;
4155         ixgbe_link_speed speed = 0;
4156         ixgbe_link_speed link_caps = 0;
4157         s32              err = IXGBE_NOT_IMPLEMENTED;
4158         bool             negotiate = FALSE;
4159
4160         /* Checks to validate new value */
4161         if (adapter->advertise == advertise) /* no change */
4162                 return (0);
4163
4164         dev = adapter->dev;
4165         hw = &adapter->hw;
4166
4167         /* No speed changes for backplane media */
4168         if (hw->phy.media_type == ixgbe_media_type_backplane)
4169                 return (ENODEV);
4170
4171         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4172               (hw->phy.multispeed_fiber))) {
4173                 device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
4174                 return (EINVAL);
4175         }
4176
4177         if (advertise < 0x1 || advertise > 0xF) {
4178                 device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n");
4179                 return (EINVAL);
4180         }
4181
4182         if (hw->mac.ops.get_link_capabilities) {
4183                 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
4184                     &negotiate);
4185                 if (err != IXGBE_SUCCESS) {
4186                         device_printf(dev, "Unable to determine supported advertise speeds\n");
4187                         return (ENODEV);
4188                 }
4189         }
4190
4191         /* Set new value and report new advertised mode */
4192         if (advertise & 0x1) {
4193                 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
4194                         device_printf(dev, "Interface does not support 100Mb advertised speed\n");
4195                         return (EINVAL);
4196                 }
4197                 speed |= IXGBE_LINK_SPEED_100_FULL;
4198         }
4199         if (advertise & 0x2) {
4200                 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
4201                         device_printf(dev, "Interface does not support 1Gb advertised speed\n");
4202                         return (EINVAL);
4203                 }
4204                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4205         }
4206         if (advertise & 0x4) {
4207                 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
4208                         device_printf(dev, "Interface does not support 10Gb advertised speed\n");
4209                         return (EINVAL);
4210                 }
4211                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4212         }
4213         if (advertise & 0x8) {
4214                 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
4215                         device_printf(dev, "Interface does not support 10Mb advertised speed\n");
4216                         return (EINVAL);
4217                 }
4218                 speed |= IXGBE_LINK_SPEED_10_FULL;
4219         }
4220
4221         hw->mac.autotry_restart = TRUE;
4222         hw->mac.ops.setup_link(hw, speed, TRUE);
4223         adapter->advertise = advertise;
4224
4225         return (0);
4226 } /* ixgbe_set_advertise */
4227
4228 /************************************************************************
4229  * ixgbe_get_advertise - Get current advertised speed settings
4230  *
4231  *   Formatted for sysctl usage.
4232  *   Flags:
4233  *     0x1 - advertise 100 Mb
4234  *     0x2 - advertise 1G
4235  *     0x4 - advertise 10G
4236  *     0x8 - advertise 10 Mb (yes, Mb)
4237  ************************************************************************/
4238 static int
4239 ixgbe_get_advertise(struct adapter *adapter)
4240 {
4241         struct ixgbe_hw  *hw = &adapter->hw;
4242         int              speed;
4243         ixgbe_link_speed link_caps = 0;
4244         s32              err;
4245         bool             negotiate = FALSE;
4246
4247         /*
4248          * Advertised speed means nothing unless it's copper or
4249          * multi-speed fiber
4250          */
4251         if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
4252             !(hw->phy.multispeed_fiber))
4253                 return (0);
4254
4255         err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
4256         if (err != IXGBE_SUCCESS)
4257                 return (0);
4258
4259         speed =
4260             ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) |
4261             ((link_caps & IXGBE_LINK_SPEED_1GB_FULL)  ? 2 : 0) |
4262             ((link_caps & IXGBE_LINK_SPEED_100_FULL)  ? 1 : 0) |
4263             ((link_caps & IXGBE_LINK_SPEED_10_FULL)   ? 8 : 0);
4264
4265         return speed;
4266 } /* ixgbe_get_advertise */
4267
4268 /************************************************************************
4269  * ixgbe_sysctl_dmac - Manage DMA Coalescing
4270  *
4271  *   Control values:
4272  *     0/1 - off / on (use default value of 1000)
4273  *
4274  *     Legal timer values are:
4275  *     50,100,250,500,1000,2000,5000,10000
4276  *
4277  *     Turning off interrupt moderation will also turn this off.
4278  ************************************************************************/
4279 static int
4280 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4281 {
4282         struct adapter *adapter = (struct adapter *)arg1;
4283         struct ifnet   *ifp = adapter->ifp;
4284         int            error;
4285         u32            newval;
4286
4287         newval = adapter->dmac;
4288         error = sysctl_handle_int(oidp, &newval, 0, req);
4289         if ((error) || (req->newptr == NULL))
4290                 return (error);
4291
4292         switch (newval) {
4293         case 0:
4294                 /* Disabled */
4295                 adapter->dmac = 0;
4296                 break;
4297         case 1:
4298                 /* Enable and use default */
4299                 adapter->dmac = 1000;
4300                 break;
4301         case 50:
4302         case 100:
4303         case 250:
4304         case 500:
4305         case 1000:
4306         case 2000:
4307         case 5000:
4308         case 10000:
4309                 /* Legal values - allow */
4310                 adapter->dmac = newval;
4311                 break;
4312         default:
4313                 /* Do nothing, illegal value */
4314                 return (EINVAL);
4315         }
4316
4317         /* Re-initialize hardware if it's already running */
4318         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4319                 ixgbe_init(adapter);
4320
4321         return (0);
4322 } /* ixgbe_sysctl_dmac */
4323
4324 #ifdef IXGBE_DEBUG
4325 /************************************************************************
4326  * ixgbe_sysctl_power_state
4327  *
4328  *   Sysctl to test power states
4329  *   Values:
4330  *     0      - set device to D0
4331  *     3      - set device to D3
4332  *     (none) - get current device power state
4333  ************************************************************************/
4334 static int
4335 ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
4336 {
4337         struct adapter *adapter = (struct adapter *)arg1;
4338         device_t       dev = adapter->dev;
4339         int            curr_ps, new_ps, error = 0;
4340
4341         curr_ps = new_ps = pci_get_powerstate(dev);
4342
4343         error = sysctl_handle_int(oidp, &new_ps, 0, req);
4344         if ((error) || (req->newptr == NULL))
4345                 return (error);
4346
4347         if (new_ps == curr_ps)
4348                 return (0);
4349
4350         if (new_ps == 3 && curr_ps == 0)
4351                 error = DEVICE_SUSPEND(dev);
4352         else if (new_ps == 0 && curr_ps == 3)
4353                 error = DEVICE_RESUME(dev);
4354         else
4355                 return (EINVAL);
4356
4357         device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
4358
4359         return (error);
4360 } /* ixgbe_sysctl_power_state */
4361 #endif
4362
4363 /************************************************************************
4364  * ixgbe_sysctl_wol_enable
4365  *
4366  *   Sysctl to enable/disable the WoL capability,
4367  *   if supported by the adapter.
4368  *
4369  *   Values:
4370  *     0 - disabled
4371  *     1 - enabled
4372  ************************************************************************/
4373 static int
4374 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4375 {
4376         struct adapter  *adapter = (struct adapter *)arg1;
4377         struct ixgbe_hw *hw = &adapter->hw;
4378         int             new_wol_enabled;
4379         int             error = 0;
4380
4381         new_wol_enabled = hw->wol_enabled;
4382         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4383         if ((error) || (req->newptr == NULL))
4384                 return (error);
4385         new_wol_enabled = !!(new_wol_enabled);
4386         if (new_wol_enabled == hw->wol_enabled)
4387                 return (0);
4388
4389         if (new_wol_enabled > 0 && !adapter->wol_support)
4390                 return (ENODEV);
4391         else
4392                 hw->wol_enabled = new_wol_enabled;
4393
4394         return (0);
4395 } /* ixgbe_sysctl_wol_enable */
4396
4397 /************************************************************************
4398  * ixgbe_sysctl_wufc - Wake Up Filter Control
4399  *
4400  *   Sysctl to enable/disable the types of packets that the
4401  *   adapter will wake up on upon receipt.
4402  *   Flags:
4403  *     0x1  - Link Status Change
4404  *     0x2  - Magic Packet
4405  *     0x4  - Direct Exact
4406  *     0x8  - Directed Multicast
4407  *     0x10 - Broadcast
4408  *     0x20 - ARP/IPv4 Request Packet
4409  *     0x40 - Direct IPv4 Packet
4410  *     0x80 - Direct IPv6 Packet
4411  *
4412  *   Settings not listed above will cause the sysctl to return an error.
4413  ************************************************************************/
4414 static int
4415 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4416 {
4417         struct adapter *adapter = (struct adapter *)arg1;
4418         int            error = 0;
4419         u32            new_wufc;
4420
4421         new_wufc = adapter->wufc;
4422
4423         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4424         if ((error) || (req->newptr == NULL))
4425                 return (error);
4426         if (new_wufc == adapter->wufc)
4427                 return (0);
4428
4429         if (new_wufc & 0xffffff00)
4430                 return (EINVAL);
4431
4432         new_wufc &= 0xff;
4433         new_wufc |= (0xffffff & adapter->wufc);
4434         adapter->wufc = new_wufc;
4435
4436         return (0);
4437 } /* ixgbe_sysctl_wufc */
4438
4439 #ifdef IXGBE_DEBUG
4440 /************************************************************************
4441  * ixgbe_sysctl_print_rss_config
4442  ************************************************************************/
4443 static int
4444 ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
4445 {
4446         struct adapter  *adapter = (struct adapter *)arg1;
4447         struct ixgbe_hw *hw = &adapter->hw;
4448         device_t        dev = adapter->dev;
4449         struct sbuf     *buf;
4450         int             error = 0, reta_size;
4451         u32             reg;
4452
4453         buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4454         if (!buf) {
4455                 device_printf(dev, "Could not allocate sbuf for output.\n");
4456                 return (ENOMEM);
4457         }
4458
4459         // TODO: use sbufs to make a string to print out
4460         /* Set multiplier for RETA setup and table size based on MAC */
4461         switch (adapter->hw.mac.type) {
4462         case ixgbe_mac_X550:
4463         case ixgbe_mac_X550EM_x:
4464         case ixgbe_mac_X550EM_a:
4465                 reta_size = 128;
4466                 break;
4467         default:
4468                 reta_size = 32;
4469                 break;
4470         }
4471
4472         /* Print out the redirection table */
4473         sbuf_cat(buf, "\n");
4474         for (int i = 0; i < reta_size; i++) {
4475                 if (i < 32) {
4476                         reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
4477                         sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
4478                 } else {
4479                         reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
4480                         sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
4481                 }
4482         }
4483
4484         // TODO: print more config
4485
4486         error = sbuf_finish(buf);
4487         if (error)
4488                 device_printf(dev, "Error finishing sbuf: %d\n", error);
4489
4490         sbuf_delete(buf);
4491
4492         return (0);
4493 } /* ixgbe_sysctl_print_rss_config */
4494 #endif /* IXGBE_DEBUG */
4495
4496 /************************************************************************
4497  * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
4498  *
4499  *   For X552/X557-AT devices using an external PHY
4500  ************************************************************************/
4501 static int
4502 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4503 {
4504         struct adapter  *adapter = (struct adapter *)arg1;
4505         struct ixgbe_hw *hw = &adapter->hw;
4506         u16             reg;
4507
4508         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4509                 device_printf(adapter->dev,
4510                     "Device has no supported external thermal sensor.\n");
4511                 return (ENODEV);
4512         }
4513
4514         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4515             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4516                 device_printf(adapter->dev,
4517                     "Error reading from PHY's current temperature register\n");
4518                 return (EAGAIN);
4519         }
4520
4521         /* Shift temp for output */
4522         reg = reg >> 8;
4523
4524         return (sysctl_handle_int(oidp, NULL, reg, req));
4525 } /* ixgbe_sysctl_phy_temp */
4526
4527 /************************************************************************
4528  * ixgbe_sysctl_phy_overtemp_occurred
4529  *
4530  *   Reports (directly from the PHY) whether the current PHY
4531  *   temperature is over the overtemp threshold.
4532  ************************************************************************/
4533 static int
4534 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4535 {
4536         struct adapter  *adapter = (struct adapter *)arg1;
4537         struct ixgbe_hw *hw = &adapter->hw;
4538         u16             reg;
4539
4540         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4541                 device_printf(adapter->dev,
4542                     "Device has no supported external thermal sensor.\n");
4543                 return (ENODEV);
4544         }
4545
4546         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4547             IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
4548                 device_printf(adapter->dev,
4549                     "Error reading from PHY's temperature status register\n");
4550                 return (EAGAIN);
4551         }
4552
4553         /* Get occurrence bit */
4554         reg = !!(reg & 0x4000);
4555
4556         return (sysctl_handle_int(oidp, 0, reg, req));
4557 } /* ixgbe_sysctl_phy_overtemp_occurred */
4558
4559 /************************************************************************
4560  * ixgbe_sysctl_eee_state
4561  *
4562  *   Sysctl to set EEE power saving feature
4563  *   Values:
4564  *     0      - disable EEE
4565  *     1      - enable EEE
4566  *     (none) - get current device EEE state
4567  ************************************************************************/
4568 static int
4569 ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
4570 {
4571         struct adapter *adapter = (struct adapter *)arg1;
4572         device_t       dev = adapter->dev;
4573         int            curr_eee, new_eee, error = 0;
4574         s32            retval;
4575
4576         curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
4577
4578         error = sysctl_handle_int(oidp, &new_eee, 0, req);
4579         if ((error) || (req->newptr == NULL))
4580                 return (error);
4581
4582         /* Nothing to do */
4583         if (new_eee == curr_eee)
4584                 return (0);
4585
4586         /* Not supported */
4587         if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
4588                 return (EINVAL);
4589
4590         /* Bounds checking */
4591         if ((new_eee < 0) || (new_eee > 1))
4592                 return (EINVAL);
4593
4594         retval = adapter->hw.mac.ops.setup_eee(&adapter->hw, new_eee);
4595         if (retval) {
4596                 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
4597                 return (EINVAL);
4598         }
4599
4600         /* Restart auto-neg */
4601         ixgbe_init(adapter);
4602
4603         device_printf(dev, "New EEE state: %d\n", new_eee);
4604
4605         /* Cache new value */
4606         if (new_eee)
4607                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4608         else
4609                 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
4610
4611         return (error);
4612 } /* ixgbe_sysctl_eee_state */
4613
4614 /************************************************************************
4615  * ixgbe_init_device_features
4616  ************************************************************************/
4617 static void
4618 ixgbe_init_device_features(struct adapter *adapter)
4619 {
4620         adapter->feat_cap = IXGBE_FEATURE_NETMAP
4621                           | IXGBE_FEATURE_RSS
4622                           | IXGBE_FEATURE_MSI
4623                           | IXGBE_FEATURE_MSIX
4624                           | IXGBE_FEATURE_LEGACY_IRQ
4625                           | IXGBE_FEATURE_LEGACY_TX;
4626
4627         /* Set capabilities first... */
4628         switch (adapter->hw.mac.type) {
4629         case ixgbe_mac_82598EB:
4630                 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
4631                         adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
4632                 break;
4633         case ixgbe_mac_X540:
4634                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4635                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4636                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
4637                     (adapter->hw.bus.func == 0))
4638                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4639                 break;
4640         case ixgbe_mac_X550:
4641                 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4642                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4643                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4644                 break;
4645         case ixgbe_mac_X550EM_x:
4646                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4647                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4648                 if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
4649                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4650                 break;
4651         case ixgbe_mac_X550EM_a:
4652                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4653                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4654                 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4655                 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
4656                     (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
4657                         adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
4658                         adapter->feat_cap |= IXGBE_FEATURE_EEE;
4659                 }
4660                 break;
4661         case ixgbe_mac_82599EB:
4662                 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
4663                 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
4664                 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
4665                     (adapter->hw.bus.func == 0))
4666                         adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
4667                 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
4668                         adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
4669                 break;
4670         default:
4671                 break;
4672         }
4673
4674         /* Enabled by default... */
4675         /* Fan failure detection */
4676         if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
4677                 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
4678         /* Netmap */
4679         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
4680                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
4681         /* EEE */
4682         if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4683                 adapter->feat_en |= IXGBE_FEATURE_EEE;
4684         /* Thermal Sensor */
4685         if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
4686                 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
4687
4688         /* Enabled via global sysctl... */
4689         /* Flow Director */
4690         if (ixgbe_enable_fdir) {
4691                 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
4692                         adapter->feat_en |= IXGBE_FEATURE_FDIR;
4693                 else
4694                         device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
4695         }
4696         /* Legacy (single queue) transmit */
4697         if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
4698             ixgbe_enable_legacy_tx)
4699                 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
4700         /*
4701          * Message Signal Interrupts - Extended (MSI-X)
4702          * Normal MSI is only enabled if MSI-X calls fail.
4703          */
4704         if (!ixgbe_enable_msix)
4705                 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
4706         /* Receive-Side Scaling (RSS) */
4707         if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
4708                 adapter->feat_en |= IXGBE_FEATURE_RSS;
4709
4710         /* Disable features with unmet dependencies... */
4711         /* No MSI-X */
4712         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
4713                 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
4714                 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
4715                 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
4716                 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
4717         }
4718 } /* ixgbe_init_device_features */
4719
4720 /************************************************************************
4721  * ixgbe_probe - Device identification routine
4722  *
4723  *   Determines if the driver should be loaded on
4724  *   adapter based on its PCI vendor/device ID.
4725  *
4726  *   return BUS_PROBE_DEFAULT on success, positive on failure
4727  ************************************************************************/
4728 static int
4729 ixgbe_probe(device_t dev)
4730 {
4731         ixgbe_vendor_info_t *ent;
4732
4733         u16  pci_vendor_id = 0;
4734         u16  pci_device_id = 0;
4735         u16  pci_subvendor_id = 0;
4736         u16  pci_subdevice_id = 0;
4737         char adapter_name[256];
4738
4739         INIT_DEBUGOUT("ixgbe_probe: begin");
4740
4741         pci_vendor_id = pci_get_vendor(dev);
4742         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
4743                 return (ENXIO);
4744
4745         pci_device_id = pci_get_device(dev);
4746         pci_subvendor_id = pci_get_subvendor(dev);
4747         pci_subdevice_id = pci_get_subdevice(dev);
4748
4749         ent = ixgbe_vendor_info_array;
4750         while (ent->vendor_id != 0) {
4751                 if ((pci_vendor_id == ent->vendor_id) &&
4752                     (pci_device_id == ent->device_id) &&
4753                     ((pci_subvendor_id == ent->subvendor_id) ||
4754                      (ent->subvendor_id == 0)) &&
4755                     ((pci_subdevice_id == ent->subdevice_id) ||
4756                      (ent->subdevice_id == 0))) {
4757                         sprintf(adapter_name, "%s, Version - %s",
4758                                 ixgbe_strings[ent->index],
4759                                 ixgbe_driver_version);
4760                         device_set_desc_copy(dev, adapter_name);
4761                         ++ixgbe_total_ports;
4762                         return (BUS_PROBE_DEFAULT);
4763                 }
4764                 ent++;
4765         }
4766
4767         return (ENXIO);
4768 } /* ixgbe_probe */
4769
4770
4771 /************************************************************************
4772  * ixgbe_ioctl - Ioctl entry point
4773  *
4774  *   Called when the user wants to configure the interface.
4775  *
4776  *   return 0 on success, positive on failure
4777  ************************************************************************/
4778 static int
4779 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4780 {
4781         struct adapter *adapter = ifp->if_softc;
4782         struct ifreq   *ifr = (struct ifreq *) data;
4783 #if defined(INET) || defined(INET6)
4784         struct ifaddr  *ifa = (struct ifaddr *)data;
4785 #endif
4786         int            error = 0;
4787         bool           avoid_reset = FALSE;
4788
4789         switch (command) {
4790         case SIOCSIFADDR:
4791 #ifdef INET
4792                 if (ifa->ifa_addr->sa_family == AF_INET)
4793                         avoid_reset = TRUE;
4794 #endif
4795 #ifdef INET6
4796                 if (ifa->ifa_addr->sa_family == AF_INET6)
4797                         avoid_reset = TRUE;
4798 #endif
4799                 /*
4800                  * Calling init results in link renegotiation,
4801                  * so we avoid doing it when possible.
4802                  */
4803                 if (avoid_reset) {
4804                         ifp->if_flags |= IFF_UP;
4805                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
4806                                 ixgbe_init(adapter);
4807 #ifdef INET
4808                         if (!(ifp->if_flags & IFF_NOARP))
4809                                 arp_ifinit(ifp, ifa);
4810 #endif
4811                 } else
4812                         error = ether_ioctl(ifp, command, data);
4813                 break;
4814         case SIOCSIFMTU:
4815                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
4816                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
4817                         error = EINVAL;
4818                 } else {
4819                         IXGBE_CORE_LOCK(adapter);
4820                         ifp->if_mtu = ifr->ifr_mtu;
4821                         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
4822                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4823                                 ixgbe_init_locked(adapter);
4824                         ixgbe_recalculate_max_frame(adapter);
4825                         IXGBE_CORE_UNLOCK(adapter);
4826                 }
4827                 break;
4828         case SIOCSIFFLAGS:
4829                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
4830                 IXGBE_CORE_LOCK(adapter);
4831                 if (ifp->if_flags & IFF_UP) {
4832                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4833                                 if ((ifp->if_flags ^ adapter->if_flags) &
4834                                     (IFF_PROMISC | IFF_ALLMULTI)) {
4835                                         ixgbe_set_promisc(adapter);
4836                                 }
4837                         } else
4838                                 ixgbe_init_locked(adapter);
4839                 } else
4840                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4841                                 ixgbe_stop(adapter);
4842                 adapter->if_flags = ifp->if_flags;
4843                 IXGBE_CORE_UNLOCK(adapter);
4844                 break;
4845         case SIOCADDMULTI:
4846         case SIOCDELMULTI:
4847                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
4848                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4849                         IXGBE_CORE_LOCK(adapter);
4850                         ixgbe_disable_intr(adapter, false);
4851                         ixgbe_set_multi(adapter);
4852                         ixgbe_enable_intr(adapter, false);
4853                         IXGBE_CORE_UNLOCK(adapter);
4854                 }
4855                 break;
4856         case SIOCSIFMEDIA:
4857         case SIOCGIFMEDIA:
4858                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
4859                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
4860                 break;
4861         case SIOCSIFCAP:
4862         {
4863                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
4864
4865                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4866
4867                 if (!mask)
4868                         break;
4869
4870                 /* HW cannot turn these on/off separately */
4871                 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
4872                         ifp->if_capenable ^= IFCAP_RXCSUM;
4873                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
4874                 }
4875                 if (mask & IFCAP_TXCSUM)
4876                         ifp->if_capenable ^= IFCAP_TXCSUM;
4877                 if (mask & IFCAP_TXCSUM_IPV6)
4878                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
4879                 if (mask & IFCAP_TSO4)
4880                         ifp->if_capenable ^= IFCAP_TSO4;
4881                 if (mask & IFCAP_TSO6)
4882                         ifp->if_capenable ^= IFCAP_TSO6;
4883                 if (mask & IFCAP_LRO)
4884                         ifp->if_capenable ^= IFCAP_LRO;
4885                 if (mask & IFCAP_VLAN_HWTAGGING)
4886                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
4887                 if (mask & IFCAP_VLAN_HWFILTER)
4888                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
4889                 if (mask & IFCAP_VLAN_HWTSO)
4890                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4891
4892                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4893                         IXGBE_CORE_LOCK(adapter);
4894                         ixgbe_init_locked(adapter);
4895                         IXGBE_CORE_UNLOCK(adapter);
4896                 }
4897                 VLAN_CAPABILITIES(ifp);
4898                 break;
4899         }
4900 #if __FreeBSD_version >= 1100036
4901         case SIOCGI2C:
4902         {
4903                 struct ixgbe_hw *hw = &adapter->hw;
4904                 struct ifi2creq i2c;
4905                 int i;
4906
4907                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
4908                 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
4909                 if (error != 0)
4910                         break;
4911                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
4912                         error = EINVAL;
4913                         break;
4914                 }
4915                 if (i2c.len > sizeof(i2c.data)) {
4916                         error = EINVAL;
4917                         break;
4918                 }
4919
4920                 for (i = 0; i < i2c.len; i++)
4921                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
4922                             i2c.dev_addr, &i2c.data[i]);
4923                 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
4924                 break;
4925         }
4926 #endif
4927         default:
4928                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
4929                 error = ether_ioctl(ifp, command, data);
4930                 break;
4931         }
4932
4933         return (error);
4934 } /* ixgbe_ioctl */
4935
4936 /************************************************************************
4937  * ixgbe_check_fan_failure
4938  ************************************************************************/
4939 static void
4940 ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
4941 {
4942         u32 mask;
4943
4944         mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
4945             IXGBE_ESDP_SDP1;
4946
4947         if (reg & mask)
4948                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
4949 } /* ixgbe_check_fan_failure */
4950
4951 /************************************************************************
4952  * ixgbe_handle_que
4953  ************************************************************************/
4954 static void
4955 ixgbe_handle_que(void *context, int pending)
4956 {
4957         struct ix_queue *que = context;
4958         struct adapter  *adapter = que->adapter;
4959         struct tx_ring  *txr = que->txr;
4960         struct ifnet    *ifp = adapter->ifp;
4961
4962         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4963                 ixgbe_rxeof(que);
4964                 IXGBE_TX_LOCK(txr);
4965                 ixgbe_txeof(txr);
4966                 if (!ixgbe_ring_empty(ifp, txr->br))
4967                         ixgbe_start_locked(ifp, txr);
4968                 IXGBE_TX_UNLOCK(txr);
4969         }
4970
4971         /* Re-enable this interrupt */
4972         if (que->res != NULL)
4973                 ixgbe_enable_queue(adapter, que->msix);
4974         else
4975                 ixgbe_enable_intr(adapter, false);
4976
4977         return;
4978 } /* ixgbe_handle_que */
4979
4980
4981
4982 /************************************************************************
4983  * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
4984  ************************************************************************/
4985 static int
4986 ixgbe_allocate_legacy(struct adapter *adapter)
4987 {
4988         device_t        dev = adapter->dev;
4989         struct ix_queue *que = adapter->queues;
4990         struct tx_ring  *txr = adapter->tx_rings;
4991         int             error;
4992
4993         /* We allocate a single interrupt resource */
4994         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
4995             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
4996         if (adapter->res == NULL) {
4997                 device_printf(dev,
4998                     "Unable to allocate bus resource: interrupt\n");
4999                 return (ENXIO);
5000         }
5001
5002         /*
5003          * Try allocating a fast interrupt and the associated deferred
5004          * processing contexts.
5005          */
5006         if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5007                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
5008         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5009         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5010             taskqueue_thread_enqueue, &que->tq);
5011         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
5012             device_get_nameunit(adapter->dev));
5013
5014         if ((error = bus_setup_intr(dev, adapter->res,
5015             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, que,
5016             &adapter->tag)) != 0) {
5017                 device_printf(dev,
5018                     "Failed to register fast interrupt handler: %d\n", error);
5019                 taskqueue_free(que->tq);
5020                 que->tq = NULL;
5021
5022                 return (error);
5023         }
5024         /* For simplicity in the handlers */
5025         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
5026
5027         return (0);
5028 } /* ixgbe_allocate_legacy */
5029
5030
5031 /************************************************************************
5032  * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
5033  ************************************************************************/
5034 static int
5035 ixgbe_allocate_msix(struct adapter *adapter)
5036 {
5037         device_t        dev = adapter->dev;
5038         struct ix_queue *que = adapter->queues;
5039         struct tx_ring  *txr = adapter->tx_rings;
5040         int             error, rid, vector = 0;
5041         int             cpu_id = 0;
5042         unsigned int    rss_buckets = 0;
5043         cpuset_t        cpu_mask;
5044
5045         /*
5046          * If we're doing RSS, the number of queues needs to
5047          * match the number of RSS buckets that are configured.
5048          *
5049          * + If there's more queues than RSS buckets, we'll end
5050          *   up with queues that get no traffic.
5051          *
5052          * + If there's more RSS buckets than queues, we'll end
5053          *   up having multiple RSS buckets map to the same queue,
5054          *   so there'll be some contention.
5055          */
5056         rss_buckets = rss_getnumbuckets();
5057         if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
5058             (adapter->num_queues != rss_buckets)) {
5059                 device_printf(dev, "%s: number of queues (%d) != number of RSS buckets (%d); performance will be impacted.\n",
5060                     __func__, adapter->num_queues, rss_buckets);
5061         }
5062
5063         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
5064                 rid = vector + 1;
5065                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
5066                     RF_SHAREABLE | RF_ACTIVE);
5067                 if (que->res == NULL) {
5068                         device_printf(dev, "Unable to allocate bus resource: que interrupt [%d]\n",
5069                             vector);
5070                         return (ENXIO);
5071                 }
5072                 /* Set the handler function */
5073                 error = bus_setup_intr(dev, que->res,
5074                     INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_que, que,
5075                     &que->tag);
5076                 if (error) {
5077                         que->res = NULL;
5078                         device_printf(dev, "Failed to register QUE handler");
5079                         return (error);
5080                 }
5081 #if __FreeBSD_version >= 800504
5082                 bus_describe_intr(dev, que->res, que->tag, "q%d", i);
5083 #endif
5084                 que->msix = vector;
5085                 adapter->active_queues |= (u64)(1 << que->msix);
5086
5087                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
5088                         /*
5089                          * The queue ID is used as the RSS layer bucket ID.
5090                          * We look up the queue ID -> RSS CPU ID and select
5091                          * that.
5092                          */
5093                         cpu_id = rss_getcpu(i % rss_buckets);
5094                         CPU_SETOF(cpu_id, &cpu_mask);
5095                 } else {
5096                         /*
5097                          * Bind the MSI-X vector, and thus the
5098                          * rings to the corresponding CPU.
5099                          *
5100                          * This just happens to match the default RSS
5101                          * round-robin bucket -> queue -> CPU allocation.
5102                          */
5103                         if (adapter->num_queues > 1)
5104                                 cpu_id = i;
5105                 }
5106                 if (adapter->num_queues > 1)
5107                         bus_bind_intr(dev, que->res, cpu_id);
5108 #ifdef IXGBE_DEBUG
5109                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5110                         device_printf(dev, "Bound RSS bucket %d to CPU %d\n", i,
5111                             cpu_id);
5112                 else
5113                         device_printf(dev, "Bound queue %d to cpu %d\n", i,
5114                             cpu_id);
5115 #endif /* IXGBE_DEBUG */
5116
5117
5118                 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
5119                         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start,
5120                             txr);
5121                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
5122                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
5123                     taskqueue_thread_enqueue, &que->tq);
5124 #if __FreeBSD_version < 1100000
5125                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s:q%d",
5126                     device_get_nameunit(adapter->dev), i);
5127 #else
5128                 if (adapter->feat_en & IXGBE_FEATURE_RSS)
5129                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5130                             &cpu_mask, "%s (bucket %d)",
5131                             device_get_nameunit(adapter->dev), cpu_id);
5132                 else
5133                         taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
5134                             NULL, "%s:q%d", device_get_nameunit(adapter->dev),
5135                             i);
5136 #endif
5137         }
5138
5139         /* and Link */
5140         adapter->link_rid = vector + 1;
5141         adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
5142             &adapter->link_rid, RF_SHAREABLE | RF_ACTIVE);
5143         if (!adapter->res) {
5144                 device_printf(dev,
5145                     "Unable to allocate bus resource: Link interrupt [%d]\n",
5146                     adapter->link_rid);
5147                 return (ENXIO);
5148         }
5149         /* Set the link handler function */
5150         error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE,
5151             NULL, ixgbe_msix_link, adapter, &adapter->tag);
5152         if (error) {
5153                 adapter->res = NULL;
5154                 device_printf(dev, "Failed to register LINK handler");
5155                 return (error);
5156         }
5157 #if __FreeBSD_version >= 800504
5158         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
5159 #endif
5160         adapter->vector = vector;
5161         return (0);
5162 } /* ixgbe_allocate_msix */
5163
5164 /************************************************************************
5165  * ixgbe_configure_interrupts
5166  *
5167  *   Setup MSI-X, MSI, or legacy interrupts (in that order).
5168  *   This will also depend on user settings.
5169  ************************************************************************/
5170 static int
5171 ixgbe_configure_interrupts(struct adapter *adapter)
5172 {
5173         device_t dev = adapter->dev;
5174         int      rid, want, queues, msgs;
5175
5176         /* Default to 1 queue if MSI-X setup fails */
5177         adapter->num_queues = 1;
5178
5179         /* Override by tuneable */
5180         if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX))
5181                 goto msi;
5182
5183         /* First try MSI-X */
5184         msgs = pci_msix_count(dev);
5185         if (msgs == 0)
5186                 goto msi;
5187         rid = PCIR_BAR(MSIX_82598_BAR);
5188         adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5189             RF_ACTIVE);
5190         if (adapter->msix_mem == NULL) {
5191                 rid += 4;  /* 82599 maps in higher BAR */
5192                 adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
5193                     &rid, RF_ACTIVE);
5194         }
5195         if (adapter->msix_mem == NULL) {
5196                 /* May not be enabled */
5197                 device_printf(adapter->dev, "Unable to map MSI-X table.\n");
5198                 goto msi;
5199         }
5200
5201         /* Figure out a reasonable auto config value */
5202         queues = min(mp_ncpus, msgs - 1);
5203         /* If we're doing RSS, clamp at the number of RSS buckets */
5204         if (adapter->feat_en & IXGBE_FEATURE_RSS)
5205                 queues = min(queues, rss_getnumbuckets());
5206         if (ixgbe_num_queues > queues) {
5207                 device_printf(adapter->dev, "ixgbe_num_queues (%d) is too large, using reduced amount (%d).\n", ixgbe_num_queues, queues);
5208                 ixgbe_num_queues = queues;
5209         }
5210
5211         if (ixgbe_num_queues != 0)
5212                 queues = ixgbe_num_queues;
5213         /* Set max queues to 8 when autoconfiguring */
5214         else
5215                 queues = min(queues, 8);
5216
5217         /* reflect correct sysctl value */
5218         ixgbe_num_queues = queues;
5219
5220         /*
5221          * Want one vector (RX/TX pair) per queue
5222          * plus an additional for Link.
5223          */
5224         want = queues + 1;
5225         if (msgs >= want)
5226                 msgs = want;
5227         else {
5228                 device_printf(adapter->dev, "MSI-X Configuration Problem, %d vectors but %d queues wanted!\n",
5229                     msgs, want);
5230                 goto msi;
5231         }
5232         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
5233                 device_printf(adapter->dev,
5234                     "Using MSI-X interrupts with %d vectors\n", msgs);
5235                 adapter->num_queues = queues;
5236                 adapter->feat_en |= IXGBE_FEATURE_MSIX;
5237                 return (0);
5238         }
5239         /*
5240          * MSI-X allocation failed or provided us with
5241          * less vectors than needed. Free MSI-X resources
5242          * and we'll try enabling MSI.
5243          */
5244         pci_release_msi(dev);
5245
5246 msi:
5247         /* Without MSI-X, some features are no longer supported */
5248         adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
5249         adapter->feat_en  &= ~IXGBE_FEATURE_RSS;
5250         adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
5251         adapter->feat_en  &= ~IXGBE_FEATURE_SRIOV;
5252
5253         if (adapter->msix_mem != NULL) {
5254                 bus_release_resource(dev, SYS_RES_MEMORY, rid,
5255                     adapter->msix_mem);
5256                 adapter->msix_mem = NULL;
5257         }
5258         msgs = 1;
5259         if (pci_alloc_msi(dev, &msgs) == 0) {
5260                 adapter->feat_en |= IXGBE_FEATURE_MSI;
5261                 adapter->link_rid = 1;
5262                 device_printf(adapter->dev, "Using an MSI interrupt\n");
5263                 return (0);
5264         }
5265
5266         if (!(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
5267                 device_printf(adapter->dev,
5268                     "Device does not support legacy interrupts.\n");
5269                 return 1;
5270         }
5271
5272         adapter->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
5273         adapter->link_rid = 0;
5274         device_printf(adapter->dev, "Using a Legacy interrupt\n");
5275
5276         return (0);
5277 } /* ixgbe_configure_interrupts */
5278
5279
5280 /************************************************************************
5281  * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
5282  *
5283  *   Done outside of interrupt context since the driver might sleep
5284  ************************************************************************/
5285 static void
5286 ixgbe_handle_link(void *context)
5287 {
5288         struct adapter  *adapter = context;
5289         struct ixgbe_hw *hw = &adapter->hw;
5290
5291         ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
5292
5293         /* Re-enable link interrupts */
5294         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
5295 } /* ixgbe_handle_link */
5296
5297 /************************************************************************
5298  * ixgbe_rearm_queues
5299  ************************************************************************/
5300 static void
5301 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
5302 {
5303         u32 mask;
5304
5305         switch (adapter->hw.mac.type) {
5306         case ixgbe_mac_82598EB:
5307                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
5308                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
5309                 break;
5310         case ixgbe_mac_82599EB:
5311         case ixgbe_mac_X540:
5312         case ixgbe_mac_X550:
5313         case ixgbe_mac_X550EM_x:
5314         case ixgbe_mac_X550EM_a:
5315                 mask = (queues & 0xFFFFFFFF);
5316                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
5317                 mask = (queues >> 32);
5318                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
5319                 break;
5320         default:
5321                 break;
5322         }
5323 } /* ixgbe_rearm_queues */
5324