]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/dev/ixgbe/if_ix.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Set this to one to display debug statistics
45  *********************************************************************/
46 int             ixgbe_display_debug_stats = 0;
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixgbe_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64 {
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95         /* required last entry */
96         {0, 0, 0, 0, 0}
97 };
98
99 /*********************************************************************
100  *  Table of branding strings
101  *********************************************************************/
102
103 static char    *ixgbe_strings[] = {
104         "Intel(R) PRO/10GbE PCI-Express Network Driver"
105 };
106
107 /*********************************************************************
108  *  Function prototypes
109  *********************************************************************/
110 static int      ixgbe_probe(device_t);
111 static int      ixgbe_attach(device_t);
112 static int      ixgbe_detach(device_t);
113 static int      ixgbe_shutdown(device_t);
114 static int      ixgbe_suspend(device_t);
115 static int      ixgbe_resume(device_t);
116 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void     ixgbe_init(void *);
118 static void     ixgbe_init_locked(struct adapter *);
119 static void     ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
122 #endif
123 static void     ixgbe_add_media_types(struct adapter *);
124 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int      ixgbe_media_change(struct ifnet *);
126 static void     ixgbe_identify_hardware(struct adapter *);
127 static int      ixgbe_allocate_pci_resources(struct adapter *);
128 static void     ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int      ixgbe_allocate_msix(struct adapter *);
130 static int      ixgbe_allocate_legacy(struct adapter *);
131 static int      ixgbe_setup_msix(struct adapter *);
132 static void     ixgbe_free_pci_resources(struct adapter *);
133 static void     ixgbe_local_timer(void *);
134 static int      ixgbe_setup_interface(device_t, struct adapter *);
135 static void     ixgbe_config_dmac(struct adapter *);
136 static void     ixgbe_config_delay_values(struct adapter *);
137 static void     ixgbe_config_link(struct adapter *);
138 static void     ixgbe_check_eee_support(struct adapter *);
139 static void     ixgbe_check_wol_support(struct adapter *);
140 static int      ixgbe_setup_low_power_mode(struct adapter *);
141 static void     ixgbe_rearm_queues(struct adapter *, u64);
142
143 static void     ixgbe_initialize_transmit_units(struct adapter *);
144 static void     ixgbe_initialize_receive_units(struct adapter *);
145 static void     ixgbe_enable_rx_drop(struct adapter *);
146 static void     ixgbe_disable_rx_drop(struct adapter *);
147
148 static void     ixgbe_enable_intr(struct adapter *);
149 static void     ixgbe_disable_intr(struct adapter *);
150 static void     ixgbe_update_stats_counters(struct adapter *);
151 static void     ixgbe_set_promisc(struct adapter *);
152 static void     ixgbe_set_multi(struct adapter *);
153 static void     ixgbe_update_link_status(struct adapter *);
154 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void     ixgbe_configure_ivars(struct adapter *);
156 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
157
158 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
161
162 static void     ixgbe_add_device_sysctls(struct adapter *);
163 static void     ixgbe_add_hw_stats(struct adapter *);
164
165 /* Sysctl handlers */
166 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int      ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int      ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int      ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int      ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int      ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
178
179 /* Support for pluggable optic modules */
180 static bool     ixgbe_sfp_probe(struct adapter *);
181 static void     ixgbe_setup_optics(struct adapter *);
182
183 /* Legacy (single vector interrupt handler */
184 static void     ixgbe_legacy_irq(void *);
185
186 /* The MSI/X Interrupt handlers */
187 static void     ixgbe_msix_que(void *);
188 static void     ixgbe_msix_link(void *);
189
190 /* Deferred interrupt tasklets */
191 static void     ixgbe_handle_que(void *, int);
192 static void     ixgbe_handle_link(void *, int);
193 static void     ixgbe_handle_msf(void *, int);
194 static void     ixgbe_handle_mod(void *, int);
195 static void     ixgbe_handle_phy(void *, int);
196
197 #ifdef IXGBE_FDIR
198 static void     ixgbe_reinit_fdir(void *, int);
199 #endif
200
201 /*********************************************************************
202  *  FreeBSD Device Interface Entry Points
203  *********************************************************************/
204
205 static device_method_t ix_methods[] = {
206         /* Device interface */
207         DEVMETHOD(device_probe, ixgbe_probe),
208         DEVMETHOD(device_attach, ixgbe_attach),
209         DEVMETHOD(device_detach, ixgbe_detach),
210         DEVMETHOD(device_shutdown, ixgbe_shutdown),
211         DEVMETHOD(device_suspend, ixgbe_suspend),
212         DEVMETHOD(device_resume, ixgbe_resume),
213         DEVMETHOD_END
214 };
215
216 static driver_t ix_driver = {
217         "ix", ix_methods, sizeof(struct adapter),
218 };
219
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
222
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
225
226 /*
227 ** TUNEABLE PARAMETERS:
228 */
229
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231                    "IXGBE driver parameters");
232
233 /*
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
238 */
239 static int ixgbe_enable_aim = TRUE;
240 TUNABLE_INT("hw.ix.enable_aim", &ixgbe_enable_aim);
241 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
242     "Enable adaptive interrupt moderation");
243
244 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
245 TUNABLE_INT("hw.ix.max_interrupt_rate", &ixgbe_max_interrupt_rate);
246 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
247     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
248
249 /* How many packets rxeof tries to clean at a time */
250 static int ixgbe_rx_process_limit = 256;
251 TUNABLE_INT("hw.ix.rx_process_limit", &ixgbe_rx_process_limit);
252 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
253     &ixgbe_rx_process_limit, 0,
254     "Maximum number of received packets to process at a time,"
255     "-1 means unlimited");
256
257 /* How many packets txeof tries to clean at a time */
258 static int ixgbe_tx_process_limit = 256;
259 TUNABLE_INT("hw.ix.tx_process_limit", &ixgbe_tx_process_limit);
260 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
261     &ixgbe_tx_process_limit, 0,
262     "Maximum number of sent packets to process at a time,"
263     "-1 means unlimited");
264
265 /*
266 ** Smart speed setting, default to on
267 ** this only works as a compile option
268 ** right now as its during attach, set
269 ** this to 'ixgbe_smart_speed_off' to
270 ** disable.
271 */
272 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
273
274 /*
275  * MSIX should be the default for best performance,
276  * but this allows it to be forced off for testing.
277  */
278 static int ixgbe_enable_msix = 1;
279 TUNABLE_INT("hw.ix.enable_msix", &ixgbe_enable_msix);
280 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
281     "Enable MSI-X interrupts");
282
283 /*
284  * Number of Queues, can be set to 0,
285  * it then autoconfigures based on the
286  * number of cpus with a max of 8. This
287  * can be overriden manually here.
288  */
289 static int ixgbe_num_queues = 0;
290 TUNABLE_INT("hw.ix.num_queues", &ixgbe_num_queues);
291 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
292     "Number of queues to configure up to a maximum of 8; "
293     "0 indicates autoconfigure");
294
295 /*
296 ** Number of TX descriptors per ring,
297 ** setting higher than RX as this seems
298 ** the better performing choice.
299 */
300 static int ixgbe_txd = PERFORM_TXD;
301 TUNABLE_INT("hw.ix.txd", &ixgbe_txd);
302 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
303     "Number of transmit descriptors per queue");
304
305 /* Number of RX descriptors per ring */
306 static int ixgbe_rxd = PERFORM_RXD;
307 TUNABLE_INT("hw.ix.rxd", &ixgbe_rxd);
308 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
309     "Number of receive descriptors per queue");
310
311 /*
312 ** Defining this on will allow the use
313 ** of unsupported SFP+ modules, note that
314 ** doing so you are on your own :)
315 */
316 static int allow_unsupported_sfp = FALSE;
317 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
318
319 /* Keep running tab on them for sanity check */
320 static int ixgbe_total_ports;
321
322 #ifdef IXGBE_FDIR
323 /* 
324 ** Flow Director actually 'steals'
325 ** part of the packet buffer as its
326 ** filter pool, this variable controls
327 ** how much it uses:
328 **  0 = 64K, 1 = 128K, 2 = 256K
329 */
330 static int fdir_pballoc = 1;
331 #endif
332
333 #ifdef DEV_NETMAP
334 /*
335  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
336  * be a reference on how to implement netmap support in a driver.
337  * Additional comments are in ixgbe_netmap.h .
338  *
339  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
340  * that extend the standard driver.
341  */
342 #include <dev/netmap/ixgbe_netmap.h>
343 #endif /* DEV_NETMAP */
344
345 /*********************************************************************
346  *  Device identification routine
347  *
348  *  ixgbe_probe determines if the driver should be loaded on
349  *  adapter based on PCI vendor/device id of the adapter.
350  *
351  *  return BUS_PROBE_DEFAULT on success, positive on failure
352  *********************************************************************/
353
354 static int
355 ixgbe_probe(device_t dev)
356 {
357         ixgbe_vendor_info_t *ent;
358
359         u16     pci_vendor_id = 0;
360         u16     pci_device_id = 0;
361         u16     pci_subvendor_id = 0;
362         u16     pci_subdevice_id = 0;
363         char    adapter_name[256];
364
365         INIT_DEBUGOUT("ixgbe_probe: begin");
366
367         pci_vendor_id = pci_get_vendor(dev);
368         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
369                 return (ENXIO);
370
371         pci_device_id = pci_get_device(dev);
372         pci_subvendor_id = pci_get_subvendor(dev);
373         pci_subdevice_id = pci_get_subdevice(dev);
374
375         ent = ixgbe_vendor_info_array;
376         while (ent->vendor_id != 0) {
377                 if ((pci_vendor_id == ent->vendor_id) &&
378                     (pci_device_id == ent->device_id) &&
379
380                     ((pci_subvendor_id == ent->subvendor_id) ||
381                      (ent->subvendor_id == 0)) &&
382
383                     ((pci_subdevice_id == ent->subdevice_id) ||
384                      (ent->subdevice_id == 0))) {
385                         sprintf(adapter_name, "%s, Version - %s",
386                                 ixgbe_strings[ent->index],
387                                 ixgbe_driver_version);
388                         device_set_desc_copy(dev, adapter_name);
389                         ++ixgbe_total_ports;
390                         return (BUS_PROBE_DEFAULT);
391                 }
392                 ent++;
393         }
394         return (ENXIO);
395 }
396
397 /*********************************************************************
398  *  Device initialization routine
399  *
400  *  The attach entry point is called when the driver is being loaded.
401  *  This routine identifies the type of hardware, allocates all resources
402  *  and initializes the hardware.
403  *
404  *  return 0 on success, positive on failure
405  *********************************************************************/
406
407 static int
408 ixgbe_attach(device_t dev)
409 {
410         struct adapter *adapter;
411         struct ixgbe_hw *hw;
412         int             error = 0;
413         u16             csum;
414         u32             ctrl_ext;
415
416         INIT_DEBUGOUT("ixgbe_attach: begin");
417
418         /* Allocate, clear, and link in our adapter structure */
419         adapter = device_get_softc(dev);
420         adapter->dev = adapter->osdep.dev = dev;
421         hw = &adapter->hw;
422
423         /* Core Lock Init*/
424         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
425
426         /* Set up the timer callout */
427         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
428
429         /* Determine hardware revision */
430         ixgbe_identify_hardware(adapter);
431
432         /* Do base PCI setup - map BAR0 */
433         if (ixgbe_allocate_pci_resources(adapter)) {
434                 device_printf(dev, "Allocation of PCI resources failed\n");
435                 error = ENXIO;
436                 goto err_out;
437         }
438
439         /* Do descriptor calc and sanity checks */
440         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
441             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
442                 device_printf(dev, "TXD config issue, using default!\n");
443                 adapter->num_tx_desc = DEFAULT_TXD;
444         } else
445                 adapter->num_tx_desc = ixgbe_txd;
446
447         /*
448         ** With many RX rings it is easy to exceed the
449         ** system mbuf allocation. Tuning nmbclusters
450         ** can alleviate this.
451         */
452         if (nmbclusters > 0) {
453                 int s;
454                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
455                 if (s > nmbclusters) {
456                         device_printf(dev, "RX Descriptors exceed "
457                             "system mbuf max, using default instead!\n");
458                         ixgbe_rxd = DEFAULT_RXD;
459                 }
460         }
461
462         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
463             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
464                 device_printf(dev, "RXD config issue, using default!\n");
465                 adapter->num_rx_desc = DEFAULT_RXD;
466         } else
467                 adapter->num_rx_desc = ixgbe_rxd;
468
469         /* Allocate our TX/RX Queues */
470         if (ixgbe_allocate_queues(adapter)) {
471                 error = ENOMEM;
472                 goto err_out;
473         }
474
475         /* Allocate multicast array memory. */
476         adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
477             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
478         if (adapter->mta == NULL) {
479                 device_printf(dev, "Can not allocate multicast setup array\n");
480                 error = ENOMEM;
481                 goto err_late;
482         }
483
484         /* Initialize the shared code */
485         hw->allow_unsupported_sfp = allow_unsupported_sfp;
486         error = ixgbe_init_shared_code(hw);
487         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
488                 /*
489                 ** No optics in this port, set up
490                 ** so the timer routine will probe 
491                 ** for later insertion.
492                 */
493                 adapter->sfp_probe = TRUE;
494                 error = 0;
495         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
496                 device_printf(dev,"Unsupported SFP+ module detected!\n");
497                 error = EIO;
498                 goto err_late;
499         } else if (error) {
500                 device_printf(dev,"Unable to initialize the shared code\n");
501                 error = EIO;
502                 goto err_late;
503         }
504
505         /* Make sure we have a good EEPROM before we read from it */
506         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
507                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
508                 error = EIO;
509                 goto err_late;
510         }
511
512         error = ixgbe_init_hw(hw);
513         switch (error) {
514         case IXGBE_ERR_EEPROM_VERSION:
515                 device_printf(dev, "This device is a pre-production adapter/"
516                     "LOM.  Please be aware there may be issues associated "
517                     "with your hardware.\n If you are experiencing problems "
518                     "please contact your Intel or hardware representative "
519                     "who provided you with this hardware.\n");
520                 break;
521         case IXGBE_ERR_SFP_NOT_SUPPORTED:
522                 device_printf(dev,"Unsupported SFP+ Module\n");
523                 error = EIO;
524                 goto err_late;
525         case IXGBE_ERR_SFP_NOT_PRESENT:
526                 device_printf(dev,"No SFP+ Module found\n");
527                 /* falls thru */
528         default:
529                 break;
530         }
531
532         /* Detect and set physical type */
533         ixgbe_setup_optics(adapter);
534
535         if ((adapter->msix > 1) && (ixgbe_enable_msix))
536                 error = ixgbe_allocate_msix(adapter); 
537         else
538                 error = ixgbe_allocate_legacy(adapter); 
539         if (error) 
540                 goto err_late;
541
542         /* Setup OS specific network interface */
543         if (ixgbe_setup_interface(dev, adapter) != 0)
544                 goto err_late;
545
546         /* Initialize statistics */
547         ixgbe_update_stats_counters(adapter);
548
549         /* Register for VLAN events */
550         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
551             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
552         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
553             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
554
555         /* Check PCIE slot type/speed/width */
556         ixgbe_get_slot_info(hw);
557
558         /* Set an initial default flow control value */
559         adapter->fc = ixgbe_fc_full;
560
561         /* Check for certain supported features */
562         ixgbe_check_wol_support(adapter);
563         ixgbe_check_eee_support(adapter);
564
565         /* Add sysctls */
566         ixgbe_add_device_sysctls(adapter);
567         ixgbe_add_hw_stats(adapter);
568
569         /* let hardware know driver is loaded */
570         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
571         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
572         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
573
574 #ifdef DEV_NETMAP
575         ixgbe_netmap_attach(adapter);
576 #endif /* DEV_NETMAP */
577         INIT_DEBUGOUT("ixgbe_attach: end");
578         return (0);
579
580 err_late:
581         ixgbe_free_transmit_structures(adapter);
582         ixgbe_free_receive_structures(adapter);
583 err_out:
584         if (adapter->ifp != NULL)
585                 if_free(adapter->ifp);
586         ixgbe_free_pci_resources(adapter);
587         free(adapter->mta, M_DEVBUF);
588         return (error);
589 }
590
591 /*********************************************************************
592  *  Device removal routine
593  *
594  *  The detach entry point is called when the driver is being removed.
595  *  This routine stops the adapter and deallocates all the resources
596  *  that were allocated for driver operation.
597  *
598  *  return 0 on success, positive on failure
599  *********************************************************************/
600
601 static int
602 ixgbe_detach(device_t dev)
603 {
604         struct adapter *adapter = device_get_softc(dev);
605         struct ix_queue *que = adapter->queues;
606         struct tx_ring *txr = adapter->tx_rings;
607         u32     ctrl_ext;
608
609         INIT_DEBUGOUT("ixgbe_detach: begin");
610
611         /* Make sure VLANS are not using driver */
612         if (adapter->ifp->if_vlantrunk != NULL) {
613                 device_printf(dev,"Vlan in use, detach first\n");
614                 return (EBUSY);
615         }
616
617         /* Stop the adapter */
618         IXGBE_CORE_LOCK(adapter);
619         ixgbe_setup_low_power_mode(adapter);
620         IXGBE_CORE_UNLOCK(adapter);
621
622         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
623                 if (que->tq) {
624 #ifndef IXGBE_LEGACY_TX
625                         taskqueue_drain(que->tq, &txr->txq_task);
626 #endif
627                         taskqueue_drain(que->tq, &que->que_task);
628                         taskqueue_free(que->tq);
629                 }
630         }
631
632         /* Drain the Link queue */
633         if (adapter->tq) {
634                 taskqueue_drain(adapter->tq, &adapter->link_task);
635                 taskqueue_drain(adapter->tq, &adapter->mod_task);
636                 taskqueue_drain(adapter->tq, &adapter->msf_task);
637                 taskqueue_drain(adapter->tq, &adapter->phy_task);
638 #ifdef IXGBE_FDIR
639                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
640 #endif
641                 taskqueue_free(adapter->tq);
642         }
643
644         /* let hardware know driver is unloading */
645         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
646         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
647         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
648
649         /* Unregister VLAN events */
650         if (adapter->vlan_attach != NULL)
651                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
652         if (adapter->vlan_detach != NULL)
653                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
654
655         ether_ifdetach(adapter->ifp);
656         callout_drain(&adapter->timer);
657 #ifdef DEV_NETMAP
658         netmap_detach(adapter->ifp);
659 #endif /* DEV_NETMAP */
660         ixgbe_free_pci_resources(adapter);
661         bus_generic_detach(dev);
662         if_free(adapter->ifp);
663
664         ixgbe_free_transmit_structures(adapter);
665         ixgbe_free_receive_structures(adapter);
666         free(adapter->mta, M_DEVBUF);
667
668         IXGBE_CORE_LOCK_DESTROY(adapter);
669         return (0);
670 }
671
672 /*********************************************************************
673  *
674  *  Shutdown entry point
675  *
676  **********************************************************************/
677
678 static int
679 ixgbe_shutdown(device_t dev)
680 {
681         struct adapter *adapter = device_get_softc(dev);
682         int error = 0;
683
684         INIT_DEBUGOUT("ixgbe_shutdown: begin");
685
686         IXGBE_CORE_LOCK(adapter);
687         error = ixgbe_setup_low_power_mode(adapter);
688         IXGBE_CORE_UNLOCK(adapter);
689
690         return (error);
691 }
692
693 /**
694  * Methods for going from:
695  * D0 -> D3: ixgbe_suspend
696  * D3 -> D0: ixgbe_resume
697  */
698 static int
699 ixgbe_suspend(device_t dev)
700 {
701         struct adapter *adapter = device_get_softc(dev);
702         int error = 0;
703
704         INIT_DEBUGOUT("ixgbe_suspend: begin");
705
706         IXGBE_CORE_LOCK(adapter);
707
708         error = ixgbe_setup_low_power_mode(adapter);
709
710         /* Save state and power down */
711         pci_save_state(dev);
712         pci_set_powerstate(dev, PCI_POWERSTATE_D3);
713
714         IXGBE_CORE_UNLOCK(adapter);
715
716         return (error);
717 }
718
719 static int
720 ixgbe_resume(device_t dev)
721 {
722         struct adapter *adapter = device_get_softc(dev);
723         struct ifnet *ifp = adapter->ifp;
724         struct ixgbe_hw *hw = &adapter->hw;
725         u32 wus;
726
727         INIT_DEBUGOUT("ixgbe_resume: begin");
728
729         IXGBE_CORE_LOCK(adapter);
730
731         pci_set_powerstate(dev, PCI_POWERSTATE_D0);
732         pci_restore_state(dev);
733
734         /* Read & clear WUS register */
735         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
736         if (wus)
737                 device_printf(dev, "Woken up by (WUS): %#010x\n",
738                     IXGBE_READ_REG(hw, IXGBE_WUS));
739         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
740         /* And clear WUFC until next low-power transition */
741         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
742
743         /*
744          * Required after D3->D0 transition;
745          * will re-advertise all previous advertised speeds
746          */
747         if (ifp->if_flags & IFF_UP)
748                 ixgbe_init_locked(adapter);
749
750         IXGBE_CORE_UNLOCK(adapter);
751
752         INIT_DEBUGOUT("ixgbe_resume: end");
753         return (0);
754 }
755
756
757 /*********************************************************************
758  *  Ioctl entry point
759  *
760  *  ixgbe_ioctl is called when the user wants to configure the
761  *  interface.
762  *
763  *  return 0 on success, positive on failure
764  **********************************************************************/
765
766 static int
767 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
768 {
769         struct adapter  *adapter = ifp->if_softc;
770         struct ifreq    *ifr = (struct ifreq *) data;
771 #if defined(INET) || defined(INET6)
772         struct ifaddr *ifa = (struct ifaddr *)data;
773         bool            avoid_reset = FALSE;
774 #endif
775         int             error = 0;
776
777         switch (command) {
778
779         case SIOCSIFADDR:
780 #ifdef INET
781                 if (ifa->ifa_addr->sa_family == AF_INET)
782                         avoid_reset = TRUE;
783 #endif
784 #ifdef INET6
785                 if (ifa->ifa_addr->sa_family == AF_INET6)
786                         avoid_reset = TRUE;
787 #endif
788 #if defined(INET) || defined(INET6)
789                 /*
790                 ** Calling init results in link renegotiation,
791                 ** so we avoid doing it when possible.
792                 */
793                 if (avoid_reset) {
794                         ifp->if_flags |= IFF_UP;
795                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
796                                 ixgbe_init(adapter);
797                         if (!(ifp->if_flags & IFF_NOARP))
798                                 arp_ifinit(ifp, ifa);
799                 } else
800                         error = ether_ioctl(ifp, command, data);
801 #endif
802                 break;
803         case SIOCSIFMTU:
804                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
805                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
806                         error = EINVAL;
807                 } else {
808                         IXGBE_CORE_LOCK(adapter);
809                         ifp->if_mtu = ifr->ifr_mtu;
810                         adapter->max_frame_size =
811                                 ifp->if_mtu + IXGBE_MTU_HDR;
812                         ixgbe_init_locked(adapter);
813                         IXGBE_CORE_UNLOCK(adapter);
814                 }
815                 break;
816         case SIOCSIFFLAGS:
817                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
818                 IXGBE_CORE_LOCK(adapter);
819                 if (ifp->if_flags & IFF_UP) {
820                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
821                                 if ((ifp->if_flags ^ adapter->if_flags) &
822                                     (IFF_PROMISC | IFF_ALLMULTI)) {
823                                         ixgbe_set_promisc(adapter);
824                                 }
825                         } else
826                                 ixgbe_init_locked(adapter);
827                 } else
828                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
829                                 ixgbe_stop(adapter);
830                 adapter->if_flags = ifp->if_flags;
831                 IXGBE_CORE_UNLOCK(adapter);
832                 break;
833         case SIOCADDMULTI:
834         case SIOCDELMULTI:
835                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
836                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
837                         IXGBE_CORE_LOCK(adapter);
838                         ixgbe_disable_intr(adapter);
839                         ixgbe_set_multi(adapter);
840                         ixgbe_enable_intr(adapter);
841                         IXGBE_CORE_UNLOCK(adapter);
842                 }
843                 break;
844         case SIOCSIFMEDIA:
845         case SIOCGIFMEDIA:
846                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
847                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
848                 break;
849         case SIOCSIFCAP:
850         {
851                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
852                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
853                 if (mask & IFCAP_HWCSUM)
854                         ifp->if_capenable ^= IFCAP_HWCSUM;
855                 if (mask & IFCAP_TSO4)
856                         ifp->if_capenable ^= IFCAP_TSO4;
857                 if (mask & IFCAP_TSO6)
858                         ifp->if_capenable ^= IFCAP_TSO6;
859                 if (mask & IFCAP_LRO)
860                         ifp->if_capenable ^= IFCAP_LRO;
861                 if (mask & IFCAP_VLAN_HWTAGGING)
862                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
863                 if (mask & IFCAP_VLAN_HWFILTER)
864                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
865                 if (mask & IFCAP_VLAN_HWTSO)
866                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
867                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
868                         IXGBE_CORE_LOCK(adapter);
869                         ixgbe_init_locked(adapter);
870                         IXGBE_CORE_UNLOCK(adapter);
871                 }
872                 VLAN_CAPABILITIES(ifp);
873                 break;
874         }
875 #if __FreeBSD_version >= 1100036
876         case SIOCGI2C:
877         {
878                 struct ixgbe_hw *hw = &adapter->hw;
879                 struct ifi2creq i2c;
880                 int i;
881                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
882                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
883                 if (error != 0)
884                         break;
885                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
886                         error = EINVAL;
887                         break;
888                 }
889                 if (i2c.len > sizeof(i2c.data)) {
890                         error = EINVAL;
891                         break;
892                 }
893
894                 for (i = 0; i < i2c.len; i++)
895                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
896                             i2c.dev_addr, &i2c.data[i]);
897                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
898                 break;
899         }
900 #endif
901         default:
902                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
903                 error = ether_ioctl(ifp, command, data);
904                 break;
905         }
906
907         return (error);
908 }
909
910 /*********************************************************************
911  *  Init entry point
912  *
913  *  This routine is used in two ways. It is used by the stack as
914  *  init entry point in network interface structure. It is also used
915  *  by the driver as a hw/sw initialization routine to get to a
916  *  consistent state.
917  *
918  *  return 0 on success, positive on failure
919  **********************************************************************/
920 #define IXGBE_MHADD_MFS_SHIFT 16
921
922 static void
923 ixgbe_init_locked(struct adapter *adapter)
924 {
925         struct ifnet   *ifp = adapter->ifp;
926         device_t        dev = adapter->dev;
927         struct ixgbe_hw *hw = &adapter->hw;
928         u32             k, txdctl, mhadd, gpie;
929         u32             rxdctl, rxctrl;
930
931         mtx_assert(&adapter->core_mtx, MA_OWNED);
932         INIT_DEBUGOUT("ixgbe_init_locked: begin");
933         hw->adapter_stopped = FALSE;
934         ixgbe_stop_adapter(hw);
935         callout_stop(&adapter->timer);
936
937         /* reprogram the RAR[0] in case user changed it. */
938         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
939
940         /* Get the latest mac address, User can use a LAA */
941         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
942               IXGBE_ETH_LENGTH_OF_ADDRESS);
943         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
944         hw->addr_ctrl.rar_used_count = 1;
945
946         /* Set the various hardware offload abilities */
947         ifp->if_hwassist = 0;
948         if (ifp->if_capenable & IFCAP_TSO)
949                 ifp->if_hwassist |= CSUM_TSO;
950         if (ifp->if_capenable & IFCAP_TXCSUM) {
951                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
952 #if __FreeBSD_version >= 800000
953                 if (hw->mac.type != ixgbe_mac_82598EB)
954                         ifp->if_hwassist |= CSUM_SCTP;
955 #endif
956         }
957
958         /* Prepare transmit descriptors and buffers */
959         if (ixgbe_setup_transmit_structures(adapter)) {
960                 device_printf(dev, "Could not setup transmit structures\n");
961                 ixgbe_stop(adapter);
962                 return;
963         }
964
965         ixgbe_init_hw(hw);
966         ixgbe_initialize_transmit_units(adapter);
967
968         /* Setup Multicast table */
969         ixgbe_set_multi(adapter);
970
971         /*
972         ** Determine the correct mbuf pool
973         ** for doing jumbo frames
974         */
975         if (adapter->max_frame_size <= 2048)
976                 adapter->rx_mbuf_sz = MCLBYTES;
977         else if (adapter->max_frame_size <= 4096)
978                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
979         else if (adapter->max_frame_size <= 9216)
980                 adapter->rx_mbuf_sz = MJUM9BYTES;
981         else
982                 adapter->rx_mbuf_sz = MJUM16BYTES;
983
984         /* Prepare receive descriptors and buffers */
985         if (ixgbe_setup_receive_structures(adapter)) {
986                 device_printf(dev, "Could not setup receive structures\n");
987                 ixgbe_stop(adapter);
988                 return;
989         }
990
991         /* Configure RX settings */
992         ixgbe_initialize_receive_units(adapter);
993
994         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
995
996         /* Enable Fan Failure Interrupt */
997         gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
998
999         /* Add for Module detection */
1000         if (hw->mac.type == ixgbe_mac_82599EB)
1001                 gpie |= IXGBE_SDP2_GPIEN;
1002
1003         /*
1004          * Thermal Failure Detection (X540)
1005          * Link Detection (X552)
1006          */
1007         if (hw->mac.type == ixgbe_mac_X540 ||
1008             hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1009             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1010                 gpie |= IXGBE_SDP0_GPIEN_X540;
1011
1012         if (adapter->msix > 1) {
1013                 /* Enable Enhanced MSIX mode */
1014                 gpie |= IXGBE_GPIE_MSIX_MODE;
1015                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1016                     IXGBE_GPIE_OCD;
1017         }
1018         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1019
1020         /* Set MTU size */
1021         if (ifp->if_mtu > ETHERMTU) {
1022                 /* aka IXGBE_MAXFRS on 82599 and newer */
1023                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1024                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1025                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1026                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1027         }
1028         
1029         /* Now enable all the queues */
1030         for (int i = 0; i < adapter->num_queues; i++) {
1031                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1032                 txdctl |= IXGBE_TXDCTL_ENABLE;
1033                 /* Set WTHRESH to 8, burst writeback */
1034                 txdctl |= (8 << 16);
1035                 /*
1036                  * When the internal queue falls below PTHRESH (32),
1037                  * start prefetching as long as there are at least
1038                  * HTHRESH (1) buffers ready. The values are taken
1039                  * from the Intel linux driver 3.8.21.
1040                  * Prefetching enables tx line rate even with 1 queue.
1041                  */
1042                 txdctl |= (32 << 0) | (1 << 8);
1043                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1044         }
1045
1046         for (int i = 0; i < adapter->num_queues; i++) {
1047                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1048                 if (hw->mac.type == ixgbe_mac_82598EB) {
1049                         /*
1050                         ** PTHRESH = 21
1051                         ** HTHRESH = 4
1052                         ** WTHRESH = 8
1053                         */
1054                         rxdctl &= ~0x3FFFFF;
1055                         rxdctl |= 0x080420;
1056                 }
1057                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1058                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1059                 for (k = 0; k < 10; k++) {
1060                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1061                             IXGBE_RXDCTL_ENABLE)
1062                                 break;
1063                         else
1064                                 msec_delay(1);
1065                 }
1066                 wmb();
1067 #ifdef DEV_NETMAP
1068                 /*
1069                  * In netmap mode, we must preserve the buffers made
1070                  * available to userspace before the if_init()
1071                  * (this is true by default on the TX side, because
1072                  * init makes all buffers available to userspace).
1073                  *
1074                  * netmap_reset() and the device specific routines
1075                  * (e.g. ixgbe_setup_receive_rings()) map these
1076                  * buffers at the end of the NIC ring, so here we
1077                  * must set the RDT (tail) register to make sure
1078                  * they are not overwritten.
1079                  *
1080                  * In this driver the NIC ring starts at RDH = 0,
1081                  * RDT points to the last slot available for reception (?),
1082                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1083                  */
1084                 if (ifp->if_capenable & IFCAP_NETMAP) {
1085                         struct netmap_adapter *na = NA(adapter->ifp);
1086                         struct netmap_kring *kring = &na->rx_rings[i];
1087                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1088
1089                         IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1090                 } else
1091 #endif /* DEV_NETMAP */
1092                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1093         }
1094
1095         /* Enable Receive engine */
1096         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1097         if (hw->mac.type == ixgbe_mac_82598EB)
1098                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1099         rxctrl |= IXGBE_RXCTRL_RXEN;
1100         ixgbe_enable_rx_dma(hw, rxctrl);
1101
1102         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1103
1104         /* Set up MSI/X routing */
1105         if (ixgbe_enable_msix)  {
1106                 ixgbe_configure_ivars(adapter);
1107                 /* Set up auto-mask */
1108                 if (hw->mac.type == ixgbe_mac_82598EB)
1109                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1110                 else {
1111                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1112                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1113                 }
1114         } else {  /* Simple settings for Legacy/MSI */
1115                 ixgbe_set_ivar(adapter, 0, 0, 0);
1116                 ixgbe_set_ivar(adapter, 0, 0, 1);
1117                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1118         }
1119
1120 #ifdef IXGBE_FDIR
1121         /* Init Flow director */
1122         if (hw->mac.type != ixgbe_mac_82598EB) {
1123                 u32 hdrm = 32 << fdir_pballoc;
1124
1125                 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1126                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1127         }
1128 #endif
1129
1130         /*
1131         ** Check on any SFP devices that
1132         ** need to be kick-started
1133         */
1134         if (hw->phy.type == ixgbe_phy_none) {
1135                 int err = hw->phy.ops.identify(hw);
1136                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1137                         device_printf(dev,
1138                             "Unsupported SFP+ module type was detected.\n");
1139                         return;
1140                 }
1141         }
1142
1143         /* Set moderation on the Link interrupt */
1144         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1145
1146         /* Configure Energy Efficient Ethernet for supported devices */
1147         if (adapter->eee_support)
1148                 ixgbe_setup_eee(hw, adapter->eee_enabled);
1149
1150         /* Config/Enable Link */
1151         ixgbe_config_link(adapter);
1152
1153         /* Hardware Packet Buffer & Flow Control setup */
1154         ixgbe_config_delay_values(adapter);
1155
1156         /* Initialize the FC settings */
1157         ixgbe_start_hw(hw);
1158
1159         /* Set up VLAN support and filter */
1160         ixgbe_setup_vlan_hw_support(adapter);
1161
1162         /* Setup DMA Coalescing */
1163         ixgbe_config_dmac(adapter);
1164
1165         /* And now turn on interrupts */
1166         ixgbe_enable_intr(adapter);
1167
1168         /* Now inform the stack we're ready */
1169         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1170
1171         return;
1172 }
1173
1174 static void
1175 ixgbe_init(void *arg)
1176 {
1177         struct adapter *adapter = arg;
1178
1179         IXGBE_CORE_LOCK(adapter);
1180         ixgbe_init_locked(adapter);
1181         IXGBE_CORE_UNLOCK(adapter);
1182         return;
1183 }
1184
1185 static void
1186 ixgbe_config_delay_values(struct adapter *adapter)
1187 {
1188         struct ixgbe_hw *hw = &adapter->hw;
1189         u32 rxpb, frame, size, tmp;
1190
1191         frame = adapter->max_frame_size;
1192
1193         /* Calculate High Water */
1194         switch (hw->mac.type) {
1195         case ixgbe_mac_X540:
1196         case ixgbe_mac_X550:
1197         case ixgbe_mac_X550EM_x:
1198                 tmp = IXGBE_DV_X540(frame, frame);
1199                 break;
1200         default:
1201                 tmp = IXGBE_DV(frame, frame);
1202                 break;
1203         }
1204         size = IXGBE_BT2KB(tmp);
1205         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1206         hw->fc.high_water[0] = rxpb - size;
1207
1208         /* Now calculate Low Water */
1209         switch (hw->mac.type) {
1210         case ixgbe_mac_X540:
1211         case ixgbe_mac_X550:
1212         case ixgbe_mac_X550EM_x:
1213                 tmp = IXGBE_LOW_DV_X540(frame);
1214                 break;
1215         default:
1216                 tmp = IXGBE_LOW_DV(frame);
1217                 break;
1218         }
1219         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1220
1221         hw->fc.requested_mode = adapter->fc;
1222         hw->fc.pause_time = IXGBE_FC_PAUSE;
1223         hw->fc.send_xon = TRUE;
1224 }
1225
1226 /*
1227 **
1228 ** MSIX Interrupt Handlers and Tasklets
1229 **
1230 */
1231
1232 static inline void
1233 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1234 {
1235         struct ixgbe_hw *hw = &adapter->hw;
1236         u64     queue = (u64)(1 << vector);
1237         u32     mask;
1238
1239         if (hw->mac.type == ixgbe_mac_82598EB) {
1240                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1241                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1242         } else {
1243                 mask = (queue & 0xFFFFFFFF);
1244                 if (mask)
1245                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1246                 mask = (queue >> 32);
1247                 if (mask)
1248                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1249         }
1250 }
1251
1252 static inline void
1253 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1254 {
1255         struct ixgbe_hw *hw = &adapter->hw;
1256         u64     queue = (u64)(1 << vector);
1257         u32     mask;
1258
1259         if (hw->mac.type == ixgbe_mac_82598EB) {
1260                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1261                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1262         } else {
1263                 mask = (queue & 0xFFFFFFFF);
1264                 if (mask)
1265                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1266                 mask = (queue >> 32);
1267                 if (mask)
1268                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1269         }
1270 }
1271
1272 static void
1273 ixgbe_handle_que(void *context, int pending)
1274 {
1275         struct ix_queue *que = context;
1276         struct adapter  *adapter = que->adapter;
1277         struct tx_ring  *txr = que->txr;
1278         struct ifnet    *ifp = adapter->ifp;
1279         bool            more;
1280
1281         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1282                 more = ixgbe_rxeof(que);
1283                 IXGBE_TX_LOCK(txr);
1284                 ixgbe_txeof(txr);
1285 #ifndef IXGBE_LEGACY_TX
1286                 if (!drbr_empty(ifp, txr->br))
1287                         ixgbe_mq_start_locked(ifp, txr);
1288 #else
1289                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1290                         ixgbe_start_locked(txr, ifp);
1291 #endif
1292                 IXGBE_TX_UNLOCK(txr);
1293         }
1294
1295         /* Reenable this interrupt */
1296         if (que->res != NULL)
1297                 ixgbe_enable_queue(adapter, que->msix);
1298         else
1299                 ixgbe_enable_intr(adapter);
1300         return;
1301 }
1302
1303
1304 /*********************************************************************
1305  *
1306  *  Legacy Interrupt Service routine
1307  *
1308  **********************************************************************/
1309
1310 static void
1311 ixgbe_legacy_irq(void *arg)
1312 {
1313         struct ix_queue *que = arg;
1314         struct adapter  *adapter = que->adapter;
1315         struct ixgbe_hw *hw = &adapter->hw;
1316         struct ifnet    *ifp = adapter->ifp;
1317         struct          tx_ring *txr = adapter->tx_rings;
1318         bool            more;
1319         u32             reg_eicr;
1320
1321
1322         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1323
1324         ++que->irqs;
1325         if (reg_eicr == 0) {
1326                 ixgbe_enable_intr(adapter);
1327                 return;
1328         }
1329
1330         more = ixgbe_rxeof(que);
1331
1332         IXGBE_TX_LOCK(txr);
1333         ixgbe_txeof(txr);
1334 #ifdef IXGBE_LEGACY_TX
1335         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1336                 ixgbe_start_locked(txr, ifp);
1337 #else
1338         if (!drbr_empty(ifp, txr->br))
1339                 ixgbe_mq_start_locked(ifp, txr);
1340 #endif
1341         IXGBE_TX_UNLOCK(txr);
1342
1343         /* Check for fan failure */
1344         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1345             (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1346                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1347                     "REPLACE IMMEDIATELY!!\n");
1348                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1349         }
1350
1351         /* Link status change */
1352         if (reg_eicr & IXGBE_EICR_LSC)
1353                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1354
1355         /* External PHY interrupt */
1356         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1357             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1358                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1359
1360         if (more)
1361                 taskqueue_enqueue(que->tq, &que->que_task);
1362         else
1363                 ixgbe_enable_intr(adapter);
1364         return;
1365 }
1366
1367
1368 /*********************************************************************
1369  *
1370  *  MSIX Queue Interrupt Service routine
1371  *
1372  **********************************************************************/
1373 void
1374 ixgbe_msix_que(void *arg)
1375 {
1376         struct ix_queue *que = arg;
1377         struct adapter  *adapter = que->adapter;
1378         struct ifnet    *ifp = adapter->ifp;
1379         struct tx_ring  *txr = que->txr;
1380         struct rx_ring  *rxr = que->rxr;
1381         bool            more;
1382         u32             newitr = 0;
1383
1384         /* Protect against spurious interrupts */
1385         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1386                 return;
1387
1388         ixgbe_disable_queue(adapter, que->msix);
1389         ++que->irqs;
1390
1391         more = ixgbe_rxeof(que);
1392
1393         IXGBE_TX_LOCK(txr);
1394         ixgbe_txeof(txr);
1395 #ifdef IXGBE_LEGACY_TX
1396         if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1397                 ixgbe_start_locked(txr, ifp);
1398 #else
1399         if (!drbr_empty(ifp, txr->br))
1400                 ixgbe_mq_start_locked(ifp, txr);
1401 #endif
1402         IXGBE_TX_UNLOCK(txr);
1403
1404         /* Do AIM now? */
1405
1406         if (ixgbe_enable_aim == FALSE)
1407                 goto no_calc;
1408         /*
1409         ** Do Adaptive Interrupt Moderation:
1410         **  - Write out last calculated setting
1411         **  - Calculate based on average size over
1412         **    the last interval.
1413         */
1414         if (que->eitr_setting)
1415                 IXGBE_WRITE_REG(&adapter->hw,
1416                     IXGBE_EITR(que->msix), que->eitr_setting);
1417  
1418         que->eitr_setting = 0;
1419
1420         /* Idle, do nothing */
1421         if ((txr->bytes == 0) && (rxr->bytes == 0))
1422                 goto no_calc;
1423                                 
1424         if ((txr->bytes) && (txr->packets))
1425                 newitr = txr->bytes/txr->packets;
1426         if ((rxr->bytes) && (rxr->packets))
1427                 newitr = max(newitr,
1428                     (rxr->bytes / rxr->packets));
1429         newitr += 24; /* account for hardware frame, crc */
1430
1431         /* set an upper boundary */
1432         newitr = min(newitr, 3000);
1433
1434         /* Be nice to the mid range */
1435         if ((newitr > 300) && (newitr < 1200))
1436                 newitr = (newitr / 3);
1437         else
1438                 newitr = (newitr / 2);
1439
1440         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1441                 newitr |= newitr << 16;
1442         else
1443                 newitr |= IXGBE_EITR_CNT_WDIS;
1444                  
1445         /* save for next interrupt */
1446         que->eitr_setting = newitr;
1447
1448         /* Reset state */
1449         txr->bytes = 0;
1450         txr->packets = 0;
1451         rxr->bytes = 0;
1452         rxr->packets = 0;
1453
1454 no_calc:
1455         if (more)
1456                 taskqueue_enqueue(que->tq, &que->que_task);
1457         else
1458                 ixgbe_enable_queue(adapter, que->msix);
1459         return;
1460 }
1461
1462
1463 static void
1464 ixgbe_msix_link(void *arg)
1465 {
1466         struct adapter  *adapter = arg;
1467         struct ixgbe_hw *hw = &adapter->hw;
1468         u32             reg_eicr, mod_mask;
1469
1470         ++adapter->link_irq;
1471
1472         /* First get the cause */
1473         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1474         /* Be sure the queue bits are not cleared */
1475         reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1476         /* Clear interrupt with write */
1477         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1478
1479         /* Link status change */
1480         if (reg_eicr & IXGBE_EICR_LSC)
1481                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1482
1483         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1484 #ifdef IXGBE_FDIR
1485                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1486                         /* This is probably overkill :) */
1487                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1488                                 return;
1489                         /* Disable the interrupt */
1490                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1491                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1492                 } else
1493 #endif
1494                 if (reg_eicr & IXGBE_EICR_ECC) {
1495                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1496                             "Please Reboot!!\n");
1497                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1498                 }
1499
1500                 /* Check for over temp condition */
1501                 if (reg_eicr & IXGBE_EICR_TS) {
1502                         device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1503                             "PHY IS SHUT DOWN!!\n");
1504                         device_printf(adapter->dev, "System shutdown required!\n");
1505                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1506                 }
1507         }
1508
1509         /* Pluggable optics-related interrupt */
1510         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1511                 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1512         else
1513                 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1514
1515         if (ixgbe_is_sfp(hw)) {
1516                 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1517                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1518                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1519                 } else if (reg_eicr & mod_mask) {
1520                         IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1521                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1522                 }
1523         }
1524
1525         /* Check for fan failure */
1526         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1527             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1528                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1529                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1530                     "REPLACE IMMEDIATELY!!\n");
1531         }
1532
1533         /* External PHY interrupt */
1534         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1535             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1536                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1537                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1538         }
1539
1540         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1541         return;
1542 }
1543
1544 /*********************************************************************
1545  *
1546  *  Media Ioctl callback
1547  *
1548  *  This routine is called whenever the user queries the status of
1549  *  the interface using ifconfig.
1550  *
1551  **********************************************************************/
1552 static void
1553 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1554 {
1555         struct adapter *adapter = ifp->if_softc;
1556         struct ixgbe_hw *hw = &adapter->hw;
1557         int layer;
1558
1559         INIT_DEBUGOUT("ixgbe_media_status: begin");
1560         IXGBE_CORE_LOCK(adapter);
1561         ixgbe_update_link_status(adapter);
1562
1563         ifmr->ifm_status = IFM_AVALID;
1564         ifmr->ifm_active = IFM_ETHER;
1565
1566         if (!adapter->link_active) {
1567                 IXGBE_CORE_UNLOCK(adapter);
1568                 return;
1569         }
1570
1571         ifmr->ifm_status |= IFM_ACTIVE;
1572         layer = ixgbe_get_supported_physical_layer(hw);
1573
1574         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1575             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1576             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1577                 switch (adapter->link_speed) {
1578                 case IXGBE_LINK_SPEED_10GB_FULL:
1579                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1580                         break;
1581                 case IXGBE_LINK_SPEED_1GB_FULL:
1582                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1583                         break;
1584                 case IXGBE_LINK_SPEED_100_FULL:
1585                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1586                         break;
1587                 }
1588         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1589             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1590                 switch (adapter->link_speed) {
1591                 case IXGBE_LINK_SPEED_10GB_FULL:
1592                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1593                         break;
1594                 }
1595         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1596                 switch (adapter->link_speed) {
1597                 case IXGBE_LINK_SPEED_10GB_FULL:
1598                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1599                         break;
1600                 case IXGBE_LINK_SPEED_1GB_FULL:
1601                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1602                         break;
1603                 }
1604         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1605                 switch (adapter->link_speed) {
1606                 case IXGBE_LINK_SPEED_10GB_FULL:
1607                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1608                         break;
1609                 case IXGBE_LINK_SPEED_1GB_FULL:
1610                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1611                         break;
1612                 }
1613         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1614             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1615                 switch (adapter->link_speed) {
1616                 case IXGBE_LINK_SPEED_10GB_FULL:
1617                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1618                         break;
1619                 case IXGBE_LINK_SPEED_1GB_FULL:
1620                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1621                         break;
1622                 }
1623         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1624                 switch (adapter->link_speed) {
1625                 case IXGBE_LINK_SPEED_10GB_FULL:
1626                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1627                         break;
1628                 }
1629         /*
1630         ** XXX: These need to use the proper media types once
1631         ** they're added.
1632         */
1633         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1634                 switch (adapter->link_speed) {
1635                 case IXGBE_LINK_SPEED_10GB_FULL:
1636                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1637                         break;
1638                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1639                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1640                         break;
1641                 case IXGBE_LINK_SPEED_1GB_FULL:
1642                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1643                         break;
1644                 }
1645         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1646             || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1647                 switch (adapter->link_speed) {
1648                 case IXGBE_LINK_SPEED_10GB_FULL:
1649                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1650                         break;
1651                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1652                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1653                         break;
1654                 case IXGBE_LINK_SPEED_1GB_FULL:
1655                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1656                         break;
1657                 }
1658         
1659         /* If nothing is recognized... */
1660         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1661                 ifmr->ifm_active |= IFM_UNKNOWN;
1662         
1663 #if __FreeBSD_version >= 900025
1664         /* Display current flow control setting used on link */
1665         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1666             hw->fc.current_mode == ixgbe_fc_full)
1667                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1668         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1669             hw->fc.current_mode == ixgbe_fc_full)
1670                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1671 #endif
1672
1673         IXGBE_CORE_UNLOCK(adapter);
1674
1675         return;
1676 }
1677
1678 /*********************************************************************
1679  *
1680  *  Media Ioctl callback
1681  *
1682  *  This routine is called when the user changes speed/duplex using
1683  *  media/mediopt option with ifconfig.
1684  *
1685  **********************************************************************/
1686 static int
1687 ixgbe_media_change(struct ifnet * ifp)
1688 {
1689         struct adapter *adapter = ifp->if_softc;
1690         struct ifmedia *ifm = &adapter->media;
1691         struct ixgbe_hw *hw = &adapter->hw;
1692         ixgbe_link_speed speed = 0;
1693
1694         INIT_DEBUGOUT("ixgbe_media_change: begin");
1695
1696         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1697                 return (EINVAL);
1698
1699         if (hw->phy.media_type == ixgbe_media_type_backplane)
1700                 return (EPERM);
1701
1702         /*
1703         ** We don't actually need to check against the supported
1704         ** media types of the adapter; ifmedia will take care of
1705         ** that for us.
1706         */
1707         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1708                 case IFM_AUTO:
1709                 case IFM_10G_T:
1710                         speed |= IXGBE_LINK_SPEED_100_FULL;
1711                 case IFM_10G_LRM:
1712                 case IFM_10G_SR: /* KR, too */
1713                 case IFM_10G_LR:
1714                 case IFM_10G_CX4: /* KX4 */
1715                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1716                 case IFM_10G_TWINAX:
1717                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
1718                         break;
1719                 case IFM_1000_T:
1720                         speed |= IXGBE_LINK_SPEED_100_FULL;
1721                 case IFM_1000_LX:
1722                 case IFM_1000_SX:
1723                 case IFM_1000_CX: /* KX */
1724                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1725                         break;
1726                 case IFM_100_TX:
1727                         speed |= IXGBE_LINK_SPEED_100_FULL;
1728                         break;
1729                 default:
1730                         goto invalid;
1731         }
1732
1733         hw->mac.autotry_restart = TRUE;
1734         hw->mac.ops.setup_link(hw, speed, TRUE);
1735         adapter->advertise =
1736                 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1737                 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1738                 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1739
1740         return (0);
1741
1742 invalid:
1743         device_printf(adapter->dev, "Invalid media type!\n");
1744         return (EINVAL);
1745 }
1746
1747 static void
1748 ixgbe_set_promisc(struct adapter *adapter)
1749 {
1750         u_int32_t       reg_rctl;
1751         struct ifnet   *ifp = adapter->ifp;
1752         int             mcnt = 0;
1753
1754         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1755         reg_rctl &= (~IXGBE_FCTRL_UPE);
1756         if (ifp->if_flags & IFF_ALLMULTI)
1757                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1758         else {
1759                 struct  ifmultiaddr *ifma;
1760 #if __FreeBSD_version < 800000
1761                 IF_ADDR_LOCK(ifp);
1762 #else
1763                 if_maddr_rlock(ifp);
1764 #endif
1765                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1766                         if (ifma->ifma_addr->sa_family != AF_LINK)
1767                                 continue;
1768                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1769                                 break;
1770                         mcnt++;
1771                 }
1772 #if __FreeBSD_version < 800000
1773                 IF_ADDR_UNLOCK(ifp);
1774 #else
1775                 if_maddr_runlock(ifp);
1776 #endif
1777         }
1778         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1779                 reg_rctl &= (~IXGBE_FCTRL_MPE);
1780         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1781
1782         if (ifp->if_flags & IFF_PROMISC) {
1783                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1784                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1785         } else if (ifp->if_flags & IFF_ALLMULTI) {
1786                 reg_rctl |= IXGBE_FCTRL_MPE;
1787                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1788                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1789         }
1790         return;
1791 }
1792
1793
1794 /*********************************************************************
1795  *  Multicast Update
1796  *
1797  *  This routine is called whenever multicast address list is updated.
1798  *
1799  **********************************************************************/
1800 #define IXGBE_RAR_ENTRIES 16
1801
1802 static void
1803 ixgbe_set_multi(struct adapter *adapter)
1804 {
1805         u32     fctrl;
1806         u8      *mta;
1807         u8      *update_ptr;
1808         struct  ifmultiaddr *ifma;
1809         int     mcnt = 0;
1810         struct ifnet   *ifp = adapter->ifp;
1811
1812         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1813
1814         mta = adapter->mta;
1815         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1816             MAX_NUM_MULTICAST_ADDRESSES);
1817
1818 #if __FreeBSD_version < 800000
1819         IF_ADDR_LOCK(ifp);
1820 #else
1821         if_maddr_rlock(ifp);
1822 #endif
1823         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1824                 if (ifma->ifma_addr->sa_family != AF_LINK)
1825                         continue;
1826                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1827                         break;
1828                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1829                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1830                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1831                 mcnt++;
1832         }
1833 #if __FreeBSD_version < 800000
1834         IF_ADDR_UNLOCK(ifp);
1835 #else
1836         if_maddr_runlock(ifp);
1837 #endif
1838
1839         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1840         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1841         if (ifp->if_flags & IFF_PROMISC)
1842                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1844             ifp->if_flags & IFF_ALLMULTI) {
1845                 fctrl |= IXGBE_FCTRL_MPE;
1846                 fctrl &= ~IXGBE_FCTRL_UPE;
1847         } else
1848                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1849         
1850         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1851
1852         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1853                 update_ptr = mta;
1854                 ixgbe_update_mc_addr_list(&adapter->hw,
1855                     update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1856         }
1857
1858         return;
1859 }
1860
1861 /*
1862  * This is an iterator function now needed by the multicast
1863  * shared code. It simply feeds the shared code routine the
1864  * addresses in the array of ixgbe_set_multi() one by one.
1865  */
1866 static u8 *
1867 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1868 {
1869         u8 *addr = *update_ptr;
1870         u8 *newptr;
1871         *vmdq = 0;
1872
1873         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1874         *update_ptr = newptr;
1875         return addr;
1876 }
1877
1878
1879 /*********************************************************************
1880  *  Timer routine
1881  *
1882  *  This routine checks for link status,updates statistics,
1883  *  and runs the watchdog check.
1884  *
1885  **********************************************************************/
1886
1887 static void
1888 ixgbe_local_timer(void *arg)
1889 {
1890         struct adapter  *adapter = arg;
1891         device_t        dev = adapter->dev;
1892         struct ix_queue *que = adapter->queues;
1893         u64             queues = 0;
1894         int             hung = 0;
1895
1896         mtx_assert(&adapter->core_mtx, MA_OWNED);
1897
1898         /* Check for pluggable optics */
1899         if (adapter->sfp_probe)
1900                 if (!ixgbe_sfp_probe(adapter))
1901                         goto out; /* Nothing to do */
1902
1903         ixgbe_update_link_status(adapter);
1904         ixgbe_update_stats_counters(adapter);
1905
1906         /*
1907         ** Check the TX queues status
1908         **      - mark hung queues so we don't schedule on them
1909         **      - watchdog only if all queues show hung
1910         */          
1911         for (int i = 0; i < adapter->num_queues; i++, que++) {
1912                 /* Keep track of queues with work for soft irq */
1913                 if (que->txr->busy)
1914                         queues |= ((u64)1 << que->me);
1915                 /*
1916                 ** Each time txeof runs without cleaning, but there
1917                 ** are uncleaned descriptors it increments busy. If
1918                 ** we get to the MAX we declare it hung.
1919                 */
1920                 if (que->busy == IXGBE_QUEUE_HUNG) {
1921                         ++hung;
1922                         /* Mark the queue as inactive */
1923                         adapter->active_queues &= ~((u64)1 << que->me);
1924                         continue;
1925                 } else {
1926                         /* Check if we've come back from hung */
1927                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1928                                 adapter->active_queues |= ((u64)1 << que->me);
1929                 }
1930                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1931                         device_printf(dev,"Warning queue %d "
1932                             "appears to be hung!\n", i);
1933                         que->txr->busy = IXGBE_QUEUE_HUNG;
1934                         ++hung;
1935                 }
1936
1937         }
1938
1939         /* Only truly watchdog if all queues show hung */
1940         if (hung == adapter->num_queues)
1941                 goto watchdog;
1942         else if (queues != 0) { /* Force an IRQ on queues with work */
1943                 ixgbe_rearm_queues(adapter, queues);
1944         }
1945
1946 out:
1947         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1948         return;
1949
1950 watchdog:
1951         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1952         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1953         adapter->watchdog_events++;
1954         ixgbe_init_locked(adapter);
1955 }
1956
1957 /*
1958 ** Note: this routine updates the OS on the link state
1959 **      the real check of the hardware only happens with
1960 **      a link interrupt.
1961 */
1962 static void
1963 ixgbe_update_link_status(struct adapter *adapter)
1964 {
1965         struct ifnet    *ifp = adapter->ifp;
1966         device_t dev = adapter->dev;
1967
1968         if (adapter->link_up){ 
1969                 if (adapter->link_active == FALSE) {
1970                         if (bootverbose)
1971                                 device_printf(dev,"Link is up %d Gbps %s \n",
1972                                     ((adapter->link_speed == 128)? 10:1),
1973                                     "Full Duplex");
1974                         adapter->link_active = TRUE;
1975                         /* Update any Flow Control changes */
1976                         ixgbe_fc_enable(&adapter->hw);
1977                         /* Update DMA coalescing config */
1978                         ixgbe_config_dmac(adapter);
1979                         if_link_state_change(ifp, LINK_STATE_UP);
1980                 }
1981         } else { /* Link down */
1982                 if (adapter->link_active == TRUE) {
1983                         if (bootverbose)
1984                                 device_printf(dev,"Link is Down\n");
1985                         if_link_state_change(ifp, LINK_STATE_DOWN);
1986                         adapter->link_active = FALSE;
1987                 }
1988         }
1989
1990         return;
1991 }
1992
1993
1994 /*********************************************************************
1995  *
1996  *  This routine disables all traffic on the adapter by issuing a
1997  *  global reset on the MAC and deallocates TX/RX buffers.
1998  *
1999  **********************************************************************/
2000
2001 static void
2002 ixgbe_stop(void *arg)
2003 {
2004         struct ifnet   *ifp;
2005         struct adapter *adapter = arg;
2006         struct ixgbe_hw *hw = &adapter->hw;
2007         ifp = adapter->ifp;
2008
2009         mtx_assert(&adapter->core_mtx, MA_OWNED);
2010
2011         INIT_DEBUGOUT("ixgbe_stop: begin\n");
2012         ixgbe_disable_intr(adapter);
2013         callout_stop(&adapter->timer);
2014
2015         /* Let the stack know...*/
2016         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2017
2018         ixgbe_reset_hw(hw);
2019         hw->adapter_stopped = FALSE;
2020         ixgbe_stop_adapter(hw);
2021         if (hw->mac.type == ixgbe_mac_82599EB)
2022                 ixgbe_stop_mac_link_on_d3_82599(hw);
2023         /* Turn off the laser - noop with no optics */
2024         ixgbe_disable_tx_laser(hw);
2025
2026         /* Update the stack */
2027         adapter->link_up = FALSE;
2028         ixgbe_update_link_status(adapter);
2029
2030         /* reprogram the RAR[0] in case user changed it. */
2031         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2032
2033         return;
2034 }
2035
2036
2037 /*********************************************************************
2038  *
2039  *  Determine hardware revision.
2040  *
2041  **********************************************************************/
2042 static void
2043 ixgbe_identify_hardware(struct adapter *adapter)
2044 {
2045         device_t        dev = adapter->dev;
2046         struct ixgbe_hw *hw = &adapter->hw;
2047
2048         /* Save off the information about this board */
2049         hw->vendor_id = pci_get_vendor(dev);
2050         hw->device_id = pci_get_device(dev);
2051         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2052         hw->subsystem_vendor_id =
2053             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2054         hw->subsystem_device_id =
2055             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2056
2057         /*
2058         ** Make sure BUSMASTER is set
2059         */
2060         pci_enable_busmaster(dev);
2061
2062         /* We need this here to set the num_segs below */
2063         ixgbe_set_mac_type(hw);
2064
2065         /* Pick up the 82599 settings */
2066         if (hw->mac.type != ixgbe_mac_82598EB) {
2067                 hw->phy.smart_speed = ixgbe_smart_speed;
2068                 adapter->num_segs = IXGBE_82599_SCATTER;
2069         } else
2070                 adapter->num_segs = IXGBE_82598_SCATTER;
2071
2072         return;
2073 }
2074
2075 /*********************************************************************
2076  *
2077  *  Determine optic type
2078  *
2079  **********************************************************************/
2080 static void
2081 ixgbe_setup_optics(struct adapter *adapter)
2082 {
2083         struct ixgbe_hw *hw = &adapter->hw;
2084         int             layer;
2085
2086         layer = ixgbe_get_supported_physical_layer(hw);
2087
2088         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2089                 adapter->optics = IFM_10G_T;
2090                 return;
2091         }
2092
2093         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2094                 adapter->optics = IFM_1000_T;
2095                 return;
2096         }
2097
2098         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2099                 adapter->optics = IFM_1000_SX;
2100                 return;
2101         }
2102
2103         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2104             IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2105                 adapter->optics = IFM_10G_LR;
2106                 return;
2107         }
2108
2109         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2110                 adapter->optics = IFM_10G_SR;
2111                 return;
2112         }
2113
2114         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2115                 adapter->optics = IFM_10G_TWINAX;
2116                 return;
2117         }
2118
2119         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2120             IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2121                 adapter->optics = IFM_10G_CX4;
2122                 return;
2123         }
2124
2125         /* If we get here just set the default */
2126         adapter->optics = IFM_ETHER | IFM_AUTO;
2127         return;
2128 }
2129
2130 /*********************************************************************
2131  *
2132  *  Setup the Legacy or MSI Interrupt handler
2133  *
2134  **********************************************************************/
2135 static int
2136 ixgbe_allocate_legacy(struct adapter *adapter)
2137 {
2138         device_t        dev = adapter->dev;
2139         struct          ix_queue *que = adapter->queues;
2140 #ifndef IXGBE_LEGACY_TX
2141         struct tx_ring          *txr = adapter->tx_rings;
2142 #endif
2143         int             error, rid = 0;
2144
2145         /* MSI RID at 1 */
2146         if (adapter->msix == 1)
2147                 rid = 1;
2148
2149         /* We allocate a single interrupt resource */
2150         adapter->res = bus_alloc_resource_any(dev,
2151             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2152         if (adapter->res == NULL) {
2153                 device_printf(dev, "Unable to allocate bus resource: "
2154                     "interrupt\n");
2155                 return (ENXIO);
2156         }
2157
2158         /*
2159          * Try allocating a fast interrupt and the associated deferred
2160          * processing contexts.
2161          */
2162 #ifndef IXGBE_LEGACY_TX
2163         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2164 #endif
2165         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2166         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2167             taskqueue_thread_enqueue, &que->tq);
2168         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2169             device_get_nameunit(adapter->dev));
2170
2171         /* Tasklets for Link, SFP and Multispeed Fiber */
2172         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2173         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2174         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2175         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2176 #ifdef IXGBE_FDIR
2177         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2178 #endif
2179         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2180             taskqueue_thread_enqueue, &adapter->tq);
2181         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2182             device_get_nameunit(adapter->dev));
2183
2184         if ((error = bus_setup_intr(dev, adapter->res,
2185             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2186             que, &adapter->tag)) != 0) {
2187                 device_printf(dev, "Failed to register fast interrupt "
2188                     "handler: %d\n", error);
2189                 taskqueue_free(que->tq);
2190                 taskqueue_free(adapter->tq);
2191                 que->tq = NULL;
2192                 adapter->tq = NULL;
2193                 return (error);
2194         }
2195         /* For simplicity in the handlers */
2196         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2197
2198         return (0);
2199 }
2200
2201
2202 /*********************************************************************
2203  *
2204  *  Setup MSIX Interrupt resources and handlers 
2205  *
2206  **********************************************************************/
2207 static int
2208 ixgbe_allocate_msix(struct adapter *adapter)
2209 {
2210         device_t        dev = adapter->dev;
2211         struct          ix_queue *que = adapter->queues;
2212         struct          tx_ring *txr = adapter->tx_rings;
2213         int             error, rid, vector = 0;
2214         int             cpu_id = 0;
2215
2216         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2217                 rid = vector + 1;
2218                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2219                     RF_SHAREABLE | RF_ACTIVE);
2220                 if (que->res == NULL) {
2221                         device_printf(dev,"Unable to allocate"
2222                             " bus resource: que interrupt [%d]\n", vector);
2223                         return (ENXIO);
2224                 }
2225                 /* Set the handler function */
2226                 error = bus_setup_intr(dev, que->res,
2227                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2228                     ixgbe_msix_que, que, &que->tag);
2229                 if (error) {
2230                         que->res = NULL;
2231                         device_printf(dev, "Failed to register QUE handler");
2232                         return (error);
2233                 }
2234 #if __FreeBSD_version >= 800504
2235                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2236 #endif
2237                 que->msix = vector;
2238                 adapter->active_queues |= (u64)(1 << que->msix);
2239                 /*
2240                  * Bind the msix vector, and thus the
2241                  * rings to the corresponding cpu.
2242                  *
2243                  * This just happens to match the default RSS round-robin
2244                  * bucket -> queue -> CPU allocation.
2245                  */
2246                 if (adapter->num_queues > 1)
2247                         cpu_id = i;
2248
2249                 if (adapter->num_queues > 1)
2250                         bus_bind_intr(dev, que->res, cpu_id);
2251
2252 #ifndef IXGBE_LEGACY_TX
2253                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2254 #endif
2255                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2256                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2257                     taskqueue_thread_enqueue, &que->tq);
2258                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2259                     device_get_nameunit(adapter->dev));
2260         }
2261
2262         /* and Link */
2263         rid = vector + 1;
2264         adapter->res = bus_alloc_resource_any(dev,
2265             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2266         if (!adapter->res) {
2267                 device_printf(dev,"Unable to allocate"
2268             " bus resource: Link interrupt [%d]\n", rid);
2269                 return (ENXIO);
2270         }
2271         /* Set the link handler function */
2272         error = bus_setup_intr(dev, adapter->res,
2273             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2274             ixgbe_msix_link, adapter, &adapter->tag);
2275         if (error) {
2276                 adapter->res = NULL;
2277                 device_printf(dev, "Failed to register LINK handler");
2278                 return (error);
2279         }
2280 #if __FreeBSD_version >= 800504
2281         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2282 #endif
2283         adapter->vector = vector;
2284         /* Tasklets for Link, SFP and Multispeed Fiber */
2285         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2286         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2287         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2288         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2289 #ifdef IXGBE_FDIR
2290         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2291 #endif
2292         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2293             taskqueue_thread_enqueue, &adapter->tq);
2294         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2295             device_get_nameunit(adapter->dev));
2296
2297         return (0);
2298 }
2299
2300 /*
2301  * Setup Either MSI/X or MSI
2302  */
2303 static int
2304 ixgbe_setup_msix(struct adapter *adapter)
2305 {
2306         device_t dev = adapter->dev;
2307         int rid, want, queues, msgs;
2308
2309         /* Override by tuneable */
2310         if (ixgbe_enable_msix == 0)
2311                 goto msi;
2312
2313         /* First try MSI/X */
2314         msgs = pci_msix_count(dev); 
2315         if (msgs == 0)
2316                 goto msi;
2317         rid = PCIR_BAR(MSIX_82598_BAR);
2318         adapter->msix_mem = bus_alloc_resource_any(dev,
2319             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2320         if (adapter->msix_mem == NULL) {
2321                 rid += 4;       /* 82599 maps in higher BAR */
2322                 adapter->msix_mem = bus_alloc_resource_any(dev,
2323                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2324         }
2325         if (adapter->msix_mem == NULL) {
2326                 /* May not be enabled */
2327                 device_printf(adapter->dev,
2328                     "Unable to map MSIX table \n");
2329                 goto msi;
2330         }
2331
2332         /* Figure out a reasonable auto config value */
2333         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2334
2335         if (ixgbe_num_queues != 0)
2336                 queues = ixgbe_num_queues;
2337         /* Set max queues to 8 when autoconfiguring */
2338         else if ((ixgbe_num_queues == 0) && (queues > 8))
2339                 queues = 8;
2340
2341         /* reflect correct sysctl value */
2342         ixgbe_num_queues = queues;
2343
2344         /*
2345         ** Want one vector (RX/TX pair) per queue
2346         ** plus an additional for Link.
2347         */
2348         want = queues + 1;
2349         if (msgs >= want)
2350                 msgs = want;
2351         else {
2352                 device_printf(adapter->dev,
2353                     "MSIX Configuration Problem, "
2354                     "%d vectors but %d queues wanted!\n",
2355                     msgs, want);
2356                 goto msi;
2357         }
2358         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2359                 device_printf(adapter->dev,
2360                     "Using MSIX interrupts with %d vectors\n", msgs);
2361                 adapter->num_queues = queues;
2362                 return (msgs);
2363         }
2364         /*
2365         ** If MSIX alloc failed or provided us with
2366         ** less than needed, free and fall through to MSI
2367         */
2368         pci_release_msi(dev);
2369
2370 msi:
2371         if (adapter->msix_mem != NULL) {
2372                 bus_release_resource(dev, SYS_RES_MEMORY,
2373                     rid, adapter->msix_mem);
2374                 adapter->msix_mem = NULL;
2375         }
2376         msgs = 1;
2377         if (pci_alloc_msi(dev, &msgs) == 0) {
2378                 device_printf(adapter->dev,"Using an MSI interrupt\n");
2379                 return (msgs);
2380         }
2381         device_printf(adapter->dev,"Using a Legacy interrupt\n");
2382         return (0);
2383 }
2384
2385
2386 static int
2387 ixgbe_allocate_pci_resources(struct adapter *adapter)
2388 {
2389         int             rid;
2390         device_t        dev = adapter->dev;
2391
2392         rid = PCIR_BAR(0);
2393         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2394             &rid, RF_ACTIVE);
2395
2396         if (!(adapter->pci_mem)) {
2397                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2398                 return (ENXIO);
2399         }
2400
2401         adapter->osdep.mem_bus_space_tag =
2402                 rman_get_bustag(adapter->pci_mem);
2403         adapter->osdep.mem_bus_space_handle =
2404                 rman_get_bushandle(adapter->pci_mem);
2405         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2406
2407         /* Legacy defaults */
2408         adapter->num_queues = 1;
2409         adapter->hw.back = &adapter->osdep;
2410
2411         /*
2412         ** Now setup MSI or MSI/X, should
2413         ** return us the number of supported
2414         ** vectors. (Will be 1 for MSI)
2415         */
2416         adapter->msix = ixgbe_setup_msix(adapter);
2417         return (0);
2418 }
2419
2420 static void
2421 ixgbe_free_pci_resources(struct adapter * adapter)
2422 {
2423         struct          ix_queue *que = adapter->queues;
2424         device_t        dev = adapter->dev;
2425         int             rid, memrid;
2426
2427         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2428                 memrid = PCIR_BAR(MSIX_82598_BAR);
2429         else
2430                 memrid = PCIR_BAR(MSIX_82599_BAR);
2431
2432         /*
2433         ** There is a slight possibility of a failure mode
2434         ** in attach that will result in entering this function
2435         ** before interrupt resources have been initialized, and
2436         ** in that case we do not want to execute the loops below
2437         ** We can detect this reliably by the state of the adapter
2438         ** res pointer.
2439         */
2440         if (adapter->res == NULL)
2441                 goto mem;
2442
2443         /*
2444         **  Release all msix queue resources:
2445         */
2446         for (int i = 0; i < adapter->num_queues; i++, que++) {
2447                 rid = que->msix + 1;
2448                 if (que->tag != NULL) {
2449                         bus_teardown_intr(dev, que->res, que->tag);
2450                         que->tag = NULL;
2451                 }
2452                 if (que->res != NULL)
2453                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2454         }
2455
2456
2457         /* Clean the Legacy or Link interrupt last */
2458         if (adapter->vector) /* we are doing MSIX */
2459                 rid = adapter->vector + 1;
2460         else
2461                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2462
2463         if (adapter->tag != NULL) {
2464                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2465                 adapter->tag = NULL;
2466         }
2467         if (adapter->res != NULL)
2468                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2469
2470 mem:
2471         if (adapter->msix)
2472                 pci_release_msi(dev);
2473
2474         if (adapter->msix_mem != NULL)
2475                 bus_release_resource(dev, SYS_RES_MEMORY,
2476                     memrid, adapter->msix_mem);
2477
2478         if (adapter->pci_mem != NULL)
2479                 bus_release_resource(dev, SYS_RES_MEMORY,
2480                     PCIR_BAR(0), adapter->pci_mem);
2481
2482         return;
2483 }
2484
2485 /*********************************************************************
2486  *
2487  *  Setup networking device structure and register an interface.
2488  *
2489  **********************************************************************/
2490 static int
2491 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2492 {
2493         struct ifnet   *ifp;
2494
2495         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2496
2497         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2498         if (ifp == NULL) {
2499                 device_printf(dev, "can not allocate ifnet structure\n");
2500                 return (-1);
2501         }
2502         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2503         if_initbaudrate(ifp, IF_Gbps(10));
2504         ifp->if_init = ixgbe_init;
2505         ifp->if_softc = adapter;
2506         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2507         ifp->if_ioctl = ixgbe_ioctl;
2508         /* TSO parameters */
2509         ifp->if_hw_tsomax = 65518;
2510         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2511         ifp->if_hw_tsomaxsegsize = 2048;
2512 #ifndef IXGBE_LEGACY_TX
2513         ifp->if_transmit = ixgbe_mq_start;
2514         ifp->if_qflush = ixgbe_qflush;
2515 #else
2516         ifp->if_start = ixgbe_start;
2517         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2518         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2519         IFQ_SET_READY(&ifp->if_snd);
2520 #endif
2521
2522         ether_ifattach(ifp, adapter->hw.mac.addr);
2523
2524         adapter->max_frame_size =
2525             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2526
2527         /*
2528          * Tell the upper layer(s) we support long frames.
2529          */
2530         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2531
2532         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2533         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2534         ifp->if_capabilities |= IFCAP_LRO;
2535         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2536                              |  IFCAP_VLAN_HWTSO
2537                              |  IFCAP_VLAN_MTU
2538                              |  IFCAP_HWSTATS;
2539         ifp->if_capenable = ifp->if_capabilities;
2540
2541         /*
2542         ** Don't turn this on by default, if vlans are
2543         ** created on another pseudo device (eg. lagg)
2544         ** then vlan events are not passed thru, breaking
2545         ** operation, but with HW FILTER off it works. If
2546         ** using vlans directly on the ixgbe driver you can
2547         ** enable this and get full hardware tag filtering.
2548         */
2549         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2550
2551         /*
2552          * Specify the media types supported by this adapter and register
2553          * callbacks to update media and link information
2554          */
2555         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2556                     ixgbe_media_status);
2557
2558         ixgbe_add_media_types(adapter);
2559
2560         /* Autoselect media by default */
2561         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2562
2563         return (0);
2564 }
2565
2566 static void
2567 ixgbe_add_media_types(struct adapter *adapter)
2568 {
2569         struct ixgbe_hw *hw = &adapter->hw;
2570         device_t dev = adapter->dev;
2571         int layer;
2572
2573         layer = ixgbe_get_supported_physical_layer(hw);
2574
2575         /* Media types with matching FreeBSD media defines */
2576         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2577                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2578         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2579                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2580         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2581                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2582         
2583         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2584             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2585                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2586
2587         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2588                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2589         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2590                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2591         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2592                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2593         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2594                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2595
2596         /*
2597         ** Other (no matching FreeBSD media type):
2598         ** To workaround this, we'll assign these completely
2599         ** inappropriate media types.
2600         */
2601         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2602                 device_printf(dev, "Media supported: 10GbaseKR\n");
2603                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2604                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2605         }
2606         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2607                 device_printf(dev, "Media supported: 10GbaseKX4\n");
2608                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2609                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2610         }
2611         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2612                 device_printf(dev, "Media supported: 1000baseKX\n");
2613                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2614                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2615         }
2616         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2617                 /* Someday, someone will care about you... */
2618                 device_printf(dev, "Media supported: 1000baseBX\n");
2619         }
2620         
2621         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2622                 ifmedia_add(&adapter->media,
2623                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2624                 ifmedia_add(&adapter->media,
2625                     IFM_ETHER | IFM_1000_T, 0, NULL);
2626         }
2627
2628         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2629 }
2630
2631 static void
2632 ixgbe_config_link(struct adapter *adapter)
2633 {
2634         struct ixgbe_hw *hw = &adapter->hw;
2635         u32     autoneg, err = 0;
2636         bool    sfp, negotiate;
2637
2638         sfp = ixgbe_is_sfp(hw);
2639
2640         if (sfp) { 
2641                 if (hw->phy.multispeed_fiber) {
2642                         hw->mac.ops.setup_sfp(hw);
2643                         ixgbe_enable_tx_laser(hw);
2644                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2645                 } else
2646                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2647         } else {
2648                 if (hw->mac.ops.check_link)
2649                         err = ixgbe_check_link(hw, &adapter->link_speed,
2650                             &adapter->link_up, FALSE);
2651                 if (err)
2652                         goto out;
2653                 autoneg = hw->phy.autoneg_advertised;
2654                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2655                         err  = hw->mac.ops.get_link_capabilities(hw,
2656                             &autoneg, &negotiate);
2657                 if (err)
2658                         goto out;
2659                 if (hw->mac.ops.setup_link)
2660                         err = hw->mac.ops.setup_link(hw,
2661                             autoneg, adapter->link_up);
2662         }
2663 out:
2664         return;
2665 }
2666
2667
2668 /*********************************************************************
2669  *
2670  *  Enable transmit units.
2671  *
2672  **********************************************************************/
2673 static void
2674 ixgbe_initialize_transmit_units(struct adapter *adapter)
2675 {
2676         struct tx_ring  *txr = adapter->tx_rings;
2677         struct ixgbe_hw *hw = &adapter->hw;
2678
2679         /* Setup the Base and Length of the Tx Descriptor Ring */
2680
2681         for (int i = 0; i < adapter->num_queues; i++, txr++) {
2682                 u64     tdba = txr->txdma.dma_paddr;
2683                 u32     txctrl = 0;
2684
2685                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2686                        (tdba & 0x00000000ffffffffULL));
2687                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2688                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2689                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2690
2691                 /* Setup the HW Tx Head and Tail descriptor pointers */
2692                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2693                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2694
2695                 /* Cache the tail address */
2696                 txr->tail = IXGBE_TDT(txr->me);
2697
2698                 /* Set the processing limit */
2699                 txr->process_limit = ixgbe_tx_process_limit;
2700
2701                 /* Disable Head Writeback */
2702                 switch (hw->mac.type) {
2703                 case ixgbe_mac_82598EB:
2704                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2705                         break;
2706                 case ixgbe_mac_82599EB:
2707                 case ixgbe_mac_X540:
2708                 default:
2709                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2710                         break;
2711                 }
2712                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2713                 switch (hw->mac.type) {
2714                 case ixgbe_mac_82598EB:
2715                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2716                         break;
2717                 case ixgbe_mac_82599EB:
2718                 case ixgbe_mac_X540:
2719                 default:
2720                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2721                         break;
2722                 }
2723
2724         }
2725
2726         if (hw->mac.type != ixgbe_mac_82598EB) {
2727                 u32 dmatxctl, rttdcs;
2728                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2729                 dmatxctl |= IXGBE_DMATXCTL_TE;
2730                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2731                 /* Disable arbiter to set MTQC */
2732                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2733                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2734                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2735                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2736                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2737                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2738         }
2739
2740         return;
2741 }
2742
2743 static void
2744 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2745 {
2746         struct ixgbe_hw *hw = &adapter->hw;
2747         uint32_t reta;
2748         int i, j, queue_id, table_size;
2749         int index_mult;
2750         uint32_t rss_key[10];
2751         uint32_t mrqc;
2752
2753         /* Setup RSS */
2754         reta = 0;
2755
2756         /* set up random bits */
2757         arc4rand(&rss_key, sizeof(rss_key), 0);
2758
2759         /* Set multiplier for RETA setup and table size based on MAC */
2760         index_mult = 0x1;
2761         table_size = 128;
2762         switch (adapter->hw.mac.type) {
2763         case ixgbe_mac_82598EB:
2764                 index_mult = 0x11;
2765                 break;
2766         case ixgbe_mac_X550:
2767         case ixgbe_mac_X550EM_x:
2768                 table_size = 512;
2769                 break;
2770         default:
2771                 break;
2772         }
2773
2774         /* Set up the redirection table */
2775         for (i = 0, j = 0; i < table_size; i++, j++) {
2776                 if (j == adapter->num_queues) j = 0;
2777                 queue_id = (j * index_mult);
2778                 /*
2779                  * The low 8 bits are for hash value (n+0);
2780                  * The next 8 bits are for hash value (n+1), etc.
2781                  */
2782                 reta = reta >> 8;
2783                 reta = reta | ( ((uint32_t) queue_id) << 24);
2784                 if ((i & 3) == 3) {
2785                         if (i < 128)
2786                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2787                         else
2788                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2789                         reta = 0;
2790                 }
2791         }
2792
2793         /* Now fill our hash function seeds */
2794         for (int i = 0; i < 10; i++)
2795                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2796
2797         /* Perform hash on these packet types */
2798         /*
2799          * Disable UDP - IP fragments aren't currently being handled
2800          * and so we end up with a mix of 2-tuple and 4-tuple
2801          * traffic.
2802          */
2803         mrqc = IXGBE_MRQC_RSSEN
2804              | IXGBE_MRQC_RSS_FIELD_IPV4
2805              | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2806 #if 0
2807              | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2808 #endif
2809              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2810              | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2811              | IXGBE_MRQC_RSS_FIELD_IPV6
2812              | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2813 #if 0
2814              | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2815              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2816 #endif
2817         ;
2818         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2819 }
2820
2821
2822 /*********************************************************************
2823  *
2824  *  Setup receive registers and features.
2825  *
2826  **********************************************************************/
2827 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2828
2829 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2830         
2831 static void
2832 ixgbe_initialize_receive_units(struct adapter *adapter)
2833 {
2834         struct  rx_ring *rxr = adapter->rx_rings;
2835         struct ixgbe_hw *hw = &adapter->hw;
2836         struct ifnet   *ifp = adapter->ifp;
2837         u32             bufsz, fctrl, srrctl, rxcsum;
2838         u32             hlreg;
2839
2840
2841         /*
2842          * Make sure receives are disabled while
2843          * setting up the descriptor ring
2844          */
2845         ixgbe_disable_rx(hw);
2846
2847         /* Enable broadcasts */
2848         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2849         fctrl |= IXGBE_FCTRL_BAM;
2850         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2851                 fctrl |= IXGBE_FCTRL_DPF;
2852                 fctrl |= IXGBE_FCTRL_PMCF;
2853         }
2854         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2855
2856         /* Set for Jumbo Frames? */
2857         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2858         if (ifp->if_mtu > ETHERMTU)
2859                 hlreg |= IXGBE_HLREG0_JUMBOEN;
2860         else
2861                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2862 #ifdef DEV_NETMAP
2863         /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2864         if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2865                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2866         else
2867                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2868 #endif /* DEV_NETMAP */
2869         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2870
2871         bufsz = (adapter->rx_mbuf_sz +
2872             BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2873
2874         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2875                 u64 rdba = rxr->rxdma.dma_paddr;
2876
2877                 /* Setup the Base and Length of the Rx Descriptor Ring */
2878                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2879                                (rdba & 0x00000000ffffffffULL));
2880                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2881                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2882                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2883
2884                 /* Set up the SRRCTL register */
2885                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2886                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2887                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2888                 srrctl |= bufsz;
2889                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2890
2891                 /*
2892                  * Set DROP_EN iff we have no flow control and >1 queue.
2893                  * Note that srrctl was cleared shortly before during reset,
2894                  * so we do not need to clear the bit, but do it just in case
2895                  * this code is moved elsewhere.
2896                  */
2897                 if (adapter->num_queues > 1 &&
2898                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2899                         srrctl |= IXGBE_SRRCTL_DROP_EN;
2900                 } else {
2901                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2902                 }
2903
2904                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2905
2906                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2907                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2908                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2909
2910                 /* Set the processing limit */
2911                 rxr->process_limit = ixgbe_rx_process_limit;
2912
2913                 /* Set the driver rx tail address */
2914                 rxr->tail =  IXGBE_RDT(rxr->me);
2915         }
2916
2917         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2918                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2919                               IXGBE_PSRTYPE_UDPHDR |
2920                               IXGBE_PSRTYPE_IPV4HDR |
2921                               IXGBE_PSRTYPE_IPV6HDR;
2922                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2923         }
2924
2925         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2926
2927         ixgbe_initialise_rss_mapping(adapter);
2928
2929         if (adapter->num_queues > 1) {
2930                 /* RSS and RX IPP Checksum are mutually exclusive */
2931                 rxcsum |= IXGBE_RXCSUM_PCSD;
2932         }
2933
2934         if (ifp->if_capenable & IFCAP_RXCSUM)
2935                 rxcsum |= IXGBE_RXCSUM_PCSD;
2936
2937         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2938                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2939
2940         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2941
2942         return;
2943 }
2944
2945
2946 /*
2947 ** This routine is run via an vlan config EVENT,
2948 ** it enables us to use the HW Filter table since
2949 ** we can get the vlan id. This just creates the
2950 ** entry in the soft version of the VFTA, init will
2951 ** repopulate the real table.
2952 */
2953 static void
2954 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2955 {
2956         struct adapter  *adapter = ifp->if_softc;
2957         u16             index, bit;
2958
2959         if (ifp->if_softc !=  arg)   /* Not our event */
2960                 return;
2961
2962         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2963                 return;
2964
2965         IXGBE_CORE_LOCK(adapter);
2966         index = (vtag >> 5) & 0x7F;
2967         bit = vtag & 0x1F;
2968         adapter->shadow_vfta[index] |= (1 << bit);
2969         ++adapter->num_vlans;
2970         ixgbe_setup_vlan_hw_support(adapter);
2971         IXGBE_CORE_UNLOCK(adapter);
2972 }
2973
2974 /*
2975 ** This routine is run via an vlan
2976 ** unconfig EVENT, remove our entry
2977 ** in the soft vfta.
2978 */
2979 static void
2980 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2981 {
2982         struct adapter  *adapter = ifp->if_softc;
2983         u16             index, bit;
2984
2985         if (ifp->if_softc !=  arg)
2986                 return;
2987
2988         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2989                 return;
2990
2991         IXGBE_CORE_LOCK(adapter);
2992         index = (vtag >> 5) & 0x7F;
2993         bit = vtag & 0x1F;
2994         adapter->shadow_vfta[index] &= ~(1 << bit);
2995         --adapter->num_vlans;
2996         /* Re-init to load the changes */
2997         ixgbe_setup_vlan_hw_support(adapter);
2998         IXGBE_CORE_UNLOCK(adapter);
2999 }
3000
3001 static void
3002 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
3003 {
3004         struct ifnet    *ifp = adapter->ifp;
3005         struct ixgbe_hw *hw = &adapter->hw;
3006         struct rx_ring  *rxr;
3007         u32             ctrl;
3008
3009
3010         /*
3011         ** We get here thru init_locked, meaning
3012         ** a soft reset, this has already cleared
3013         ** the VFTA and other state, so if there
3014         ** have been no vlan's registered do nothing.
3015         */
3016         if (adapter->num_vlans == 0)
3017                 return;
3018
3019         /* Setup the queues for vlans */
3020         for (int i = 0; i < adapter->num_queues; i++) {
3021                 rxr = &adapter->rx_rings[i];
3022                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3023                 if (hw->mac.type != ixgbe_mac_82598EB) {
3024                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3025                         ctrl |= IXGBE_RXDCTL_VME;
3026                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3027                 }
3028                 rxr->vtag_strip = TRUE;
3029         }
3030
3031         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3032                 return;
3033         /*
3034         ** A soft reset zero's out the VFTA, so
3035         ** we need to repopulate it now.
3036         */
3037         for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3038                 if (adapter->shadow_vfta[i] != 0)
3039                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3040                             adapter->shadow_vfta[i]);
3041
3042         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3043         /* Enable the Filter Table if enabled */
3044         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3045                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3046                 ctrl |= IXGBE_VLNCTRL_VFE;
3047         }
3048         if (hw->mac.type == ixgbe_mac_82598EB)
3049                 ctrl |= IXGBE_VLNCTRL_VME;
3050         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3051 }
3052
3053 static void
3054 ixgbe_enable_intr(struct adapter *adapter)
3055 {
3056         struct ixgbe_hw *hw = &adapter->hw;
3057         struct ix_queue *que = adapter->queues;
3058         u32             mask, fwsm;
3059
3060         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3061         /* Enable Fan Failure detection */
3062         if (hw->device_id == IXGBE_DEV_ID_82598AT)
3063                     mask |= IXGBE_EIMS_GPI_SDP1;
3064
3065         switch (adapter->hw.mac.type) {
3066                 case ixgbe_mac_82599EB:
3067                         mask |= IXGBE_EIMS_ECC;
3068                         /* Temperature sensor on some adapters */
3069                         mask |= IXGBE_EIMS_GPI_SDP0;
3070                         /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3071                         mask |= IXGBE_EIMS_GPI_SDP1;
3072                         mask |= IXGBE_EIMS_GPI_SDP2;
3073 #ifdef IXGBE_FDIR
3074                         mask |= IXGBE_EIMS_FLOW_DIR;
3075 #endif
3076                         break;
3077                 case ixgbe_mac_X540:
3078                         /* Detect if Thermal Sensor is enabled */
3079                         fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3080                         if (fwsm & IXGBE_FWSM_TS_ENABLED)
3081                                 mask |= IXGBE_EIMS_TS;
3082                         mask |= IXGBE_EIMS_ECC;
3083 #ifdef IXGBE_FDIR
3084                         mask |= IXGBE_EIMS_FLOW_DIR;
3085 #endif
3086                         break;
3087                 case ixgbe_mac_X550:
3088                 case ixgbe_mac_X550EM_x:
3089                         /* MAC thermal sensor is automatically enabled */
3090                         mask |= IXGBE_EIMS_TS;
3091                         /* Some devices use SDP0 for important information */
3092                         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3093                             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3094                                 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3095                         mask |= IXGBE_EIMS_ECC;
3096 #ifdef IXGBE_FDIR
3097                         mask |= IXGBE_EIMS_FLOW_DIR;
3098 #endif
3099                 /* falls through */
3100                 default:
3101                         break;
3102         }
3103
3104         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3105
3106         /* With MSI-X we use auto clear */
3107         if (adapter->msix_mem) {
3108                 mask = IXGBE_EIMS_ENABLE_MASK;
3109                 /* Don't autoclear Link */
3110                 mask &= ~IXGBE_EIMS_OTHER;
3111                 mask &= ~IXGBE_EIMS_LSC;
3112                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3113         }
3114
3115         /*
3116         ** Now enable all queues, this is done separately to
3117         ** allow for handling the extended (beyond 32) MSIX
3118         ** vectors that can be used by 82599
3119         */
3120         for (int i = 0; i < adapter->num_queues; i++, que++)
3121                 ixgbe_enable_queue(adapter, que->msix);
3122
3123         IXGBE_WRITE_FLUSH(hw);
3124
3125         return;
3126 }
3127
3128 static void
3129 ixgbe_disable_intr(struct adapter *adapter)
3130 {
3131         if (adapter->msix_mem)
3132                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3133         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3134                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3135         } else {
3136                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3137                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3138                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3139         }
3140         IXGBE_WRITE_FLUSH(&adapter->hw);
3141         return;
3142 }
3143
3144 /*
3145 ** Get the width and transaction speed of
3146 ** the slot this adapter is plugged into.
3147 */
3148 static void
3149 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3150 {
3151         device_t                dev = ((struct ixgbe_osdep *)hw->back)->dev;
3152         struct ixgbe_mac_info   *mac = &hw->mac;
3153         u16                     link;
3154         u32                     offset;
3155
3156         /* For most devices simply call the shared code routine */
3157         if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3158                 ixgbe_get_bus_info(hw);
3159                 /* These devices don't use PCI-E */
3160                 switch (hw->mac.type) {
3161                 case ixgbe_mac_X550EM_x:
3162                         return;
3163                 default:
3164                         goto display;
3165                 }
3166         }
3167
3168         /*
3169         ** For the Quad port adapter we need to parse back
3170         ** up the PCI tree to find the speed of the expansion
3171         ** slot into which this adapter is plugged. A bit more work.
3172         */
3173         dev = device_get_parent(device_get_parent(dev));
3174 #ifdef IXGBE_DEBUG
3175         device_printf(dev, "parent pcib = %x,%x,%x\n",
3176             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3177 #endif
3178         dev = device_get_parent(device_get_parent(dev));
3179 #ifdef IXGBE_DEBUG
3180         device_printf(dev, "slot pcib = %x,%x,%x\n",
3181             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3182 #endif
3183         /* Now get the PCI Express Capabilities offset */
3184         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3185         /* ...and read the Link Status Register */
3186         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3187         switch (link & IXGBE_PCI_LINK_WIDTH) {
3188         case IXGBE_PCI_LINK_WIDTH_1:
3189                 hw->bus.width = ixgbe_bus_width_pcie_x1;
3190                 break;
3191         case IXGBE_PCI_LINK_WIDTH_2:
3192                 hw->bus.width = ixgbe_bus_width_pcie_x2;
3193                 break;
3194         case IXGBE_PCI_LINK_WIDTH_4:
3195                 hw->bus.width = ixgbe_bus_width_pcie_x4;
3196                 break;
3197         case IXGBE_PCI_LINK_WIDTH_8:
3198                 hw->bus.width = ixgbe_bus_width_pcie_x8;
3199                 break;
3200         default:
3201                 hw->bus.width = ixgbe_bus_width_unknown;
3202                 break;
3203         }
3204
3205         switch (link & IXGBE_PCI_LINK_SPEED) {
3206         case IXGBE_PCI_LINK_SPEED_2500:
3207                 hw->bus.speed = ixgbe_bus_speed_2500;
3208                 break;
3209         case IXGBE_PCI_LINK_SPEED_5000:
3210                 hw->bus.speed = ixgbe_bus_speed_5000;
3211                 break;
3212         case IXGBE_PCI_LINK_SPEED_8000:
3213                 hw->bus.speed = ixgbe_bus_speed_8000;
3214                 break;
3215         default:
3216                 hw->bus.speed = ixgbe_bus_speed_unknown;
3217                 break;
3218         }
3219
3220         mac->ops.set_lan_id(hw);
3221
3222 display:
3223         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3224             ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3225             (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3226             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3227             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3228             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3229             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3230             ("Unknown"));
3231
3232         if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3233             ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3234             (hw->bus.speed == ixgbe_bus_speed_2500))) {
3235                 device_printf(dev, "PCI-Express bandwidth available"
3236                     " for this card\n     is not sufficient for"
3237                     " optimal performance.\n");
3238                 device_printf(dev, "For optimal performance a x8 "
3239                     "PCIE, or x4 PCIE Gen2 slot is required.\n");
3240         }
3241         if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3242             ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3243             (hw->bus.speed < ixgbe_bus_speed_8000))) {
3244                 device_printf(dev, "PCI-Express bandwidth available"
3245                     " for this card\n     is not sufficient for"
3246                     " optimal performance.\n");
3247                 device_printf(dev, "For optimal performance a x8 "
3248                     "PCIE Gen3 slot is required.\n");
3249         }
3250
3251         return;
3252 }
3253
3254
3255 /*
3256 ** Setup the correct IVAR register for a particular MSIX interrupt
3257 **   (yes this is all very magic and confusing :)
3258 **  - entry is the register array entry
3259 **  - vector is the MSIX vector for this queue
3260 **  - type is RX/TX/MISC
3261 */
3262 static void
3263 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3264 {
3265         struct ixgbe_hw *hw = &adapter->hw;
3266         u32 ivar, index;
3267
3268         vector |= IXGBE_IVAR_ALLOC_VAL;
3269
3270         switch (hw->mac.type) {
3271
3272         case ixgbe_mac_82598EB:
3273                 if (type == -1)
3274                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3275                 else
3276                         entry += (type * 64);
3277                 index = (entry >> 2) & 0x1F;
3278                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3279                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3280                 ivar |= (vector << (8 * (entry & 0x3)));
3281                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3282                 break;
3283
3284         case ixgbe_mac_82599EB:
3285         case ixgbe_mac_X540:
3286         case ixgbe_mac_X550:
3287         case ixgbe_mac_X550EM_x:
3288                 if (type == -1) { /* MISC IVAR */
3289                         index = (entry & 1) * 8;
3290                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3291                         ivar &= ~(0xFF << index);
3292                         ivar |= (vector << index);
3293                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3294                 } else {        /* RX/TX IVARS */
3295                         index = (16 * (entry & 1)) + (8 * type);
3296                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3297                         ivar &= ~(0xFF << index);
3298                         ivar |= (vector << index);
3299                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3300                 }
3301
3302         default:
3303                 break;
3304         }
3305 }
3306
3307 static void
3308 ixgbe_configure_ivars(struct adapter *adapter)
3309 {
3310         struct  ix_queue *que = adapter->queues;
3311         u32 newitr;
3312
3313         if (ixgbe_max_interrupt_rate > 0)
3314                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3315         else {
3316                 /*
3317                 ** Disable DMA coalescing if interrupt moderation is
3318                 ** disabled.
3319                 */
3320                 adapter->dmac = 0;
3321                 newitr = 0;
3322         }
3323
3324         for (int i = 0; i < adapter->num_queues; i++, que++) {
3325                 /* First the RX queue entry */
3326                 ixgbe_set_ivar(adapter, i, que->msix, 0);
3327                 /* ... and the TX */
3328                 ixgbe_set_ivar(adapter, i, que->msix, 1);
3329                 /* Set an Initial EITR value */
3330                 IXGBE_WRITE_REG(&adapter->hw,
3331                     IXGBE_EITR(que->msix), newitr);
3332         }
3333
3334         /* For the Link interrupt */
3335         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3336 }
3337
3338 /*
3339 ** ixgbe_sfp_probe - called in the local timer to
3340 ** determine if a port had optics inserted.
3341 */  
3342 static bool ixgbe_sfp_probe(struct adapter *adapter)
3343 {
3344         struct ixgbe_hw *hw = &adapter->hw;
3345         device_t        dev = adapter->dev;
3346         bool            result = FALSE;
3347
3348         if ((hw->phy.type == ixgbe_phy_nl) &&
3349             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3350                 s32 ret = hw->phy.ops.identify_sfp(hw);
3351                 if (ret)
3352                         goto out;
3353                 ret = hw->phy.ops.reset(hw);
3354                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3355                         device_printf(dev,"Unsupported SFP+ module detected!");
3356                         printf(" Reload driver with supported module.\n");
3357                         adapter->sfp_probe = FALSE;
3358                         goto out;
3359                 } else
3360                         device_printf(dev,"SFP+ module detected!\n");
3361                 /* We now have supported optics */
3362                 adapter->sfp_probe = FALSE;
3363                 /* Set the optics type so system reports correctly */
3364                 ixgbe_setup_optics(adapter);
3365                 result = TRUE;
3366         }
3367 out:
3368         return (result);
3369 }
3370
3371 /*
3372 ** Tasklet handler for MSIX Link interrupts
3373 **  - do outside interrupt since it might sleep
3374 */
3375 static void
3376 ixgbe_handle_link(void *context, int pending)
3377 {
3378         struct adapter  *adapter = context;
3379
3380         ixgbe_check_link(&adapter->hw,
3381             &adapter->link_speed, &adapter->link_up, 0);
3382         ixgbe_update_link_status(adapter);
3383 }
3384
3385 /*
3386 ** Tasklet for handling SFP module interrupts
3387 */
3388 static void
3389 ixgbe_handle_mod(void *context, int pending)
3390 {
3391         struct adapter  *adapter = context;
3392         struct ixgbe_hw *hw = &adapter->hw;
3393         device_t        dev = adapter->dev;
3394         u32 err;
3395
3396         err = hw->phy.ops.identify_sfp(hw);
3397         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3398                 device_printf(dev,
3399                     "Unsupported SFP+ module type was detected.\n");
3400                 return;
3401         }
3402         err = hw->mac.ops.setup_sfp(hw);
3403         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3404                 device_printf(dev,
3405                     "Setup failure - unsupported SFP+ module type.\n");
3406                 return;
3407         }
3408         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3409         return;
3410 }
3411
3412
3413 /*
3414 ** Tasklet for handling MSF (multispeed fiber) interrupts
3415 */
3416 static void
3417 ixgbe_handle_msf(void *context, int pending)
3418 {
3419         struct adapter  *adapter = context;
3420         struct ixgbe_hw *hw = &adapter->hw;
3421         u32 autoneg;
3422         bool negotiate;
3423         int err;
3424
3425         err = hw->phy.ops.identify_sfp(hw);
3426         if (!err) {
3427                 ixgbe_setup_optics(adapter);
3428                 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3429         }
3430
3431         autoneg = hw->phy.autoneg_advertised;
3432         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3433                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3434         if (hw->mac.ops.setup_link)
3435                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3436
3437         ifmedia_removeall(&adapter->media);
3438         ixgbe_add_media_types(adapter);
3439         return;
3440 }
3441
3442 /*
3443 ** Tasklet for handling interrupts from an external PHY
3444 */
3445 static void
3446 ixgbe_handle_phy(void *context, int pending)
3447 {
3448         struct adapter  *adapter = context;
3449         struct ixgbe_hw *hw = &adapter->hw;
3450         int error;
3451
3452         error = hw->phy.ops.handle_lasi(hw);
3453         if (error == IXGBE_ERR_OVERTEMP)
3454                 device_printf(adapter->dev,
3455                     "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3456                     " PHY will downshift to lower power state!\n");
3457         else if (error)
3458                 device_printf(adapter->dev,
3459                     "Error handling LASI interrupt: %d\n",
3460                     error);
3461         return;
3462 }
3463
3464 #ifdef IXGBE_FDIR
3465 /*
3466 ** Tasklet for reinitializing the Flow Director filter table
3467 */
3468 static void
3469 ixgbe_reinit_fdir(void *context, int pending)
3470 {
3471         struct adapter  *adapter = context;
3472         struct ifnet   *ifp = adapter->ifp;
3473
3474         if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3475                 return;
3476         ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3477         adapter->fdir_reinit = 0;
3478         /* re-enable flow director interrupts */
3479         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3480         /* Restart the interface */
3481         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3482         return;
3483 }
3484 #endif
3485
3486 /*********************************************************************
3487  *
3488  *  Configure DMA Coalescing
3489  *
3490  **********************************************************************/
3491 static void
3492 ixgbe_config_dmac(struct adapter *adapter)
3493 {
3494         struct ixgbe_hw *hw = &adapter->hw;
3495         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3496
3497         if (hw->mac.type < ixgbe_mac_X550 ||
3498             !hw->mac.ops.dmac_config)
3499                 return;
3500
3501         if (dcfg->watchdog_timer ^ adapter->dmac ||
3502             dcfg->link_speed ^ adapter->link_speed) {
3503                 dcfg->watchdog_timer = adapter->dmac;
3504                 dcfg->fcoe_en = false;
3505                 dcfg->link_speed = adapter->link_speed;
3506                 dcfg->num_tcs = 1;
3507                 
3508                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3509                     dcfg->watchdog_timer, dcfg->link_speed);
3510
3511                 hw->mac.ops.dmac_config(hw);
3512         }
3513 }
3514
3515 /*
3516  * Checks whether the adapter supports Energy Efficient Ethernet
3517  * or not, based on device ID.
3518  */
3519 static void
3520 ixgbe_check_eee_support(struct adapter *adapter)
3521 {
3522         struct ixgbe_hw *hw = &adapter->hw;
3523
3524         adapter->eee_support = adapter->eee_enabled =
3525             (hw->device_id == IXGBE_DEV_ID_X550T ||
3526                 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3527 }
3528
3529 /*
3530  * Checks whether the adapter's ports are capable of
3531  * Wake On LAN by reading the adapter's NVM.
3532  *
3533  * Sets each port's hw->wol_enabled value depending
3534  * on the value read here.
3535  */
3536 static void
3537 ixgbe_check_wol_support(struct adapter *adapter)
3538 {
3539         struct ixgbe_hw *hw = &adapter->hw;
3540         u16 dev_caps = 0;
3541
3542         /* Find out WoL support for port */
3543         adapter->wol_support = hw->wol_enabled = 0;
3544         ixgbe_get_device_caps(hw, &dev_caps);
3545         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3546             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3547                 hw->bus.func == 0))
3548             adapter->wol_support = hw->wol_enabled = 1;
3549
3550         /* Save initial wake up filter configuration */
3551         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3552
3553         return;
3554 }
3555
3556 /*
3557  * Prepare the adapter/port for LPLU and/or WoL
3558  */
3559 static int
3560 ixgbe_setup_low_power_mode(struct adapter *adapter)
3561 {
3562         struct ixgbe_hw *hw = &adapter->hw;
3563         device_t dev = adapter->dev;
3564         s32 error = 0;
3565
3566         mtx_assert(&adapter->core_mtx, MA_OWNED);
3567
3568         /* Limit power management flow to X550EM baseT */
3569         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3570             && hw->phy.ops.enter_lplu) {
3571                 /* Turn off support for APM wakeup. (Using ACPI instead) */
3572                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3573                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3574
3575                 /*
3576                  * Clear Wake Up Status register to prevent any previous wakeup
3577                  * events from waking us up immediately after we suspend.
3578                  */
3579                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3580
3581                 /*
3582                  * Program the Wakeup Filter Control register with user filter
3583                  * settings
3584                  */
3585                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3586
3587                 /* Enable wakeups and power management in Wakeup Control */
3588                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3589                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3590
3591                 /* X550EM baseT adapters need a special LPLU flow */
3592                 hw->phy.reset_disable = true;
3593                 ixgbe_stop(adapter);
3594                 error = hw->phy.ops.enter_lplu(hw);
3595                 if (error)
3596                         device_printf(dev,
3597                             "Error entering LPLU: %d\n", error);
3598                 hw->phy.reset_disable = false;
3599         } else {
3600                 /* Just stop for other adapters */
3601                 ixgbe_stop(adapter);
3602         }
3603
3604         return error;
3605 }
3606
3607 /**********************************************************************
3608  *
3609  *  Update the board statistics counters.
3610  *
3611  **********************************************************************/
3612 static void
3613 ixgbe_update_stats_counters(struct adapter *adapter)
3614 {
3615         struct ixgbe_hw *hw = &adapter->hw;
3616         u32 missed_rx = 0, bprc, lxon, lxoff, total;
3617         u64 total_missed_rx = 0;
3618
3619         adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3620         adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3621         adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3622         adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3623
3624         for (int i = 0; i < 16; i++) {
3625                 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3626                 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3627                 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3628         }
3629         adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3630         adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3631         adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3632
3633         /* Hardware workaround, gprc counts missed packets */
3634         adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3635         adapter->stats.pf.gprc -= missed_rx;
3636
3637         if (hw->mac.type != ixgbe_mac_82598EB) {
3638                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3639                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3640                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3641                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3642                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3643                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3644                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3645                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3646         } else {
3647                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3648                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3649                 /* 82598 only has a counter in the high register */
3650                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3651                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3652                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3653         }
3654
3655         /*
3656          * Workaround: mprc hardware is incorrectly counting
3657          * broadcasts, so for now we subtract those.
3658          */
3659         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3660         adapter->stats.pf.bprc += bprc;
3661         adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3662         if (hw->mac.type == ixgbe_mac_82598EB)
3663                 adapter->stats.pf.mprc -= bprc;
3664
3665         adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3666         adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3667         adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3668         adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3669         adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3670         adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3671
3672         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3673         adapter->stats.pf.lxontxc += lxon;
3674         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3675         adapter->stats.pf.lxofftxc += lxoff;
3676         total = lxon + lxoff;
3677
3678         adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3679         adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3680         adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3681         adapter->stats.pf.gptc -= total;
3682         adapter->stats.pf.mptc -= total;
3683         adapter->stats.pf.ptc64 -= total;
3684         adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3685
3686         adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3687         adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3688         adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3689         adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3690         adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3691         adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3692         adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3693         adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3694         adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3695         adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3696         adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3697         adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3698         adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3699         adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3700         adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3701         adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3702         adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3703         adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3704         /* Only read FCOE on 82599 */
3705         if (hw->mac.type != ixgbe_mac_82598EB) {
3706                 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3707                 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3708                 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3709                 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3710                 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3711         }
3712
3713         /* Fill out the OS statistics structure */
3714         IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3715         IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3716         IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3717         IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3718         IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3719         IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3720         IXGBE_SET_COLLISIONS(adapter, 0);
3721         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3722         IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3723             + adapter->stats.pf.rlec);
3724 }
3725
3726 #if __FreeBSD_version >= 1100036
3727 static uint64_t
3728 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3729 {
3730         struct adapter *adapter;
3731         struct tx_ring *txr;
3732         uint64_t rv;
3733
3734         adapter = if_getsoftc(ifp);
3735
3736         switch (cnt) {
3737         case IFCOUNTER_IPACKETS:
3738                 return (adapter->ipackets);
3739         case IFCOUNTER_OPACKETS:
3740                 return (adapter->opackets);
3741         case IFCOUNTER_IBYTES:
3742                 return (adapter->ibytes);
3743         case IFCOUNTER_OBYTES:
3744                 return (adapter->obytes);
3745         case IFCOUNTER_IMCASTS:
3746                 return (adapter->imcasts);
3747         case IFCOUNTER_OMCASTS:
3748                 return (adapter->omcasts);
3749         case IFCOUNTER_COLLISIONS:
3750                 return (0);
3751         case IFCOUNTER_IQDROPS:
3752                 return (adapter->iqdrops);
3753         case IFCOUNTER_OQDROPS:
3754                 rv = 0;
3755                 txr = adapter->tx_rings;
3756                 for (int i = 0; i < adapter->num_queues; i++, txr++)
3757                         rv += txr->br->br_drops;
3758                 return (rv);
3759         case IFCOUNTER_IERRORS:
3760                 return (adapter->ierrors);
3761         default:
3762                 return (if_get_counter_default(ifp, cnt));
3763         }
3764 }
3765 #endif
3766
3767 /** ixgbe_sysctl_tdh_handler - Handler function
3768  *  Retrieves the TDH value from the hardware
3769  */
3770 static int 
3771 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3772 {
3773         int error;
3774
3775         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3776         if (!txr) return 0;
3777
3778         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3779         error = sysctl_handle_int(oidp, &val, 0, req);
3780         if (error || !req->newptr)
3781                 return error;
3782         return 0;
3783 }
3784
3785 /** ixgbe_sysctl_tdt_handler - Handler function
3786  *  Retrieves the TDT value from the hardware
3787  */
3788 static int 
3789 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3790 {
3791         int error;
3792
3793         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3794         if (!txr) return 0;
3795
3796         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3797         error = sysctl_handle_int(oidp, &val, 0, req);
3798         if (error || !req->newptr)
3799                 return error;
3800         return 0;
3801 }
3802
3803 /** ixgbe_sysctl_rdh_handler - Handler function
3804  *  Retrieves the RDH value from the hardware
3805  */
3806 static int 
3807 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3808 {
3809         int error;
3810
3811         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3812         if (!rxr) return 0;
3813
3814         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3815         error = sysctl_handle_int(oidp, &val, 0, req);
3816         if (error || !req->newptr)
3817                 return error;
3818         return 0;
3819 }
3820
3821 /** ixgbe_sysctl_rdt_handler - Handler function
3822  *  Retrieves the RDT value from the hardware
3823  */
3824 static int 
3825 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3826 {
3827         int error;
3828
3829         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3830         if (!rxr) return 0;
3831
3832         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3833         error = sysctl_handle_int(oidp, &val, 0, req);
3834         if (error || !req->newptr)
3835                 return error;
3836         return 0;
3837 }
3838
3839 static int
3840 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3841 {
3842         int error;
3843         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3844         unsigned int reg, usec, rate;
3845
3846         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3847         usec = ((reg & 0x0FF8) >> 3);
3848         if (usec > 0)
3849                 rate = 500000 / usec;
3850         else
3851                 rate = 0;
3852         error = sysctl_handle_int(oidp, &rate, 0, req);
3853         if (error || !req->newptr)
3854                 return error;
3855         reg &= ~0xfff; /* default, no limitation */
3856         ixgbe_max_interrupt_rate = 0;
3857         if (rate > 0 && rate < 500000) {
3858                 if (rate < 1000)
3859                         rate = 1000;
3860                 ixgbe_max_interrupt_rate = rate;
3861                 reg |= ((4000000/rate) & 0xff8 );
3862         }
3863         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3864         return 0;
3865 }
3866
3867 static void
3868 ixgbe_add_device_sysctls(struct adapter *adapter)
3869 {
3870         device_t dev = adapter->dev;
3871         struct ixgbe_hw *hw = &adapter->hw;
3872         struct sysctl_oid_list *child;
3873         struct sysctl_ctx_list *ctx;
3874
3875         ctx = device_get_sysctl_ctx(dev);
3876         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3877
3878         /* Sysctls for all devices */
3879         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3880                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3881                         ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3882
3883         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3884                         CTLFLAG_RW,
3885                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
3886
3887         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3888                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3889                         ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3890
3891         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3892                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3893                         ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3894
3895         /* for X550 devices */
3896         if (hw->mac.type >= ixgbe_mac_X550)
3897                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3898                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3899                                 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3900
3901         /* for X550T and X550EM backplane devices */
3902         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3903             hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3904                 struct sysctl_oid *eee_node;
3905                 struct sysctl_oid_list *eee_list;
3906
3907                 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3908                                            CTLFLAG_RD, NULL,
3909                                            "Energy Efficient Ethernet sysctls");
3910                 eee_list = SYSCTL_CHILDREN(eee_node);
3911
3912                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3913                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3914                                 ixgbe_sysctl_eee_enable, "I",
3915                                 "Enable or Disable EEE");
3916
3917                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3918                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3919                                 ixgbe_sysctl_eee_negotiated, "I",
3920                                 "EEE negotiated on link");
3921
3922                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3923                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3924                                 ixgbe_sysctl_eee_tx_lpi_status, "I",
3925                                 "Whether or not TX link is in LPI state");
3926
3927                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3928                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3929                                 ixgbe_sysctl_eee_rx_lpi_status, "I",
3930                                 "Whether or not RX link is in LPI state");
3931         }
3932
3933         /* for certain 10GBaseT devices */
3934         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3935             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3936                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3937                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3938                                 ixgbe_sysctl_wol_enable, "I",
3939                                 "Enable/Disable Wake on LAN");
3940
3941                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3942                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3943                                 ixgbe_sysctl_wufc, "I",
3944                                 "Enable/Disable Wake Up Filters");
3945         }
3946
3947         /* for X550EM 10GBaseT devices */
3948         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3949                 struct sysctl_oid *phy_node;
3950                 struct sysctl_oid_list *phy_list;
3951
3952                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3953                                            CTLFLAG_RD, NULL,
3954                                            "External PHY sysctls");
3955                 phy_list = SYSCTL_CHILDREN(phy_node);
3956
3957                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3958                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3959                                 ixgbe_sysctl_phy_temp, "I",
3960                                 "Current External PHY Temperature (Celsius)");
3961
3962                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3963                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3964                                 ixgbe_sysctl_phy_overtemp_occurred, "I",
3965                                 "External PHY High Temperature Event Occurred");
3966         }
3967 }
3968
3969 /*
3970  * Add sysctl variables, one per statistic, to the system.
3971  */
3972 static void
3973 ixgbe_add_hw_stats(struct adapter *adapter)
3974 {
3975         device_t dev = adapter->dev;
3976
3977         struct tx_ring *txr = adapter->tx_rings;
3978         struct rx_ring *rxr = adapter->rx_rings;
3979
3980         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3981         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3982         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3983         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3984
3985         struct sysctl_oid *stat_node, *queue_node;
3986         struct sysctl_oid_list *stat_list, *queue_list;
3987
3988 #define QUEUE_NAME_LEN 32
3989         char namebuf[QUEUE_NAME_LEN];
3990
3991         /* Driver Statistics */
3992         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3993                         CTLFLAG_RD, &adapter->dropped_pkts,
3994                         "Driver dropped packets");
3995         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3996                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3997                         "m_defrag() failed");
3998         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3999                         CTLFLAG_RD, &adapter->watchdog_events,
4000                         "Watchdog timeouts");
4001         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
4002                         CTLFLAG_RD, &adapter->link_irq,
4003                         "Link MSIX IRQ Handled");
4004
4005         for (int i = 0; i < adapter->num_queues; i++, txr++) {
4006                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4007                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4008                                             CTLFLAG_RD, NULL, "Queue Name");
4009                 queue_list = SYSCTL_CHILDREN(queue_node);
4010
4011                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4012                                 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4013                                 sizeof(&adapter->queues[i]),
4014                                 ixgbe_sysctl_interrupt_rate_handler, "IU",
4015                                 "Interrupt Rate");
4016                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4017                                 CTLFLAG_RD, &(adapter->queues[i].irqs),
4018                                 "irqs on this queue");
4019                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
4020                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4021                                 ixgbe_sysctl_tdh_handler, "IU",
4022                                 "Transmit Descriptor Head");
4023                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
4024                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4025                                 ixgbe_sysctl_tdt_handler, "IU",
4026                                 "Transmit Descriptor Tail");
4027                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4028                                 CTLFLAG_RD, &txr->tso_tx,
4029                                 "TSO");
4030                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4031                                 CTLFLAG_RD, &txr->no_tx_dma_setup,
4032                                 "Driver tx dma failure in xmit");
4033                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4034                                 CTLFLAG_RD, &txr->no_desc_avail,
4035                                 "Queue No Descriptor Available");
4036                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4037                                 CTLFLAG_RD, &txr->total_packets,
4038                                 "Queue Packets Transmitted");
4039                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4040                                 CTLFLAG_RD, &txr->br->br_drops,
4041                                 "Packets dropped in buf_ring");
4042         }
4043
4044         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4045                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4046                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4047                                             CTLFLAG_RD, NULL, "Queue Name");
4048                 queue_list = SYSCTL_CHILDREN(queue_node);
4049
4050                 struct lro_ctrl *lro = &rxr->lro;
4051
4052                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4053                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4054                                             CTLFLAG_RD, NULL, "Queue Name");
4055                 queue_list = SYSCTL_CHILDREN(queue_node);
4056
4057                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
4058                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4059                                 ixgbe_sysctl_rdh_handler, "IU",
4060                                 "Receive Descriptor Head");
4061                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
4062                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4063                                 ixgbe_sysctl_rdt_handler, "IU",
4064                                 "Receive Descriptor Tail");
4065                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4066                                 CTLFLAG_RD, &rxr->rx_packets,
4067                                 "Queue Packets Received");
4068                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4069                                 CTLFLAG_RD, &rxr->rx_bytes,
4070                                 "Queue Bytes Received");
4071                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4072                                 CTLFLAG_RD, &rxr->rx_copies,
4073                                 "Copied RX Frames");
4074                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4075                                 CTLFLAG_RD, &lro->lro_queued, 0,
4076                                 "LRO Queued");
4077                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4078                                 CTLFLAG_RD, &lro->lro_flushed, 0,
4079                                 "LRO Flushed");
4080         }
4081
4082         /* MAC stats get the own sub node */
4083
4084         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4085                                     CTLFLAG_RD, NULL, "MAC Statistics");
4086         stat_list = SYSCTL_CHILDREN(stat_node);
4087
4088         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4089                         CTLFLAG_RD, &stats->crcerrs,
4090                         "CRC Errors");
4091         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4092                         CTLFLAG_RD, &stats->illerrc,
4093                         "Illegal Byte Errors");
4094         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4095                         CTLFLAG_RD, &stats->errbc,
4096                         "Byte Errors");
4097         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4098                         CTLFLAG_RD, &stats->mspdc,
4099                         "MAC Short Packets Discarded");
4100         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4101                         CTLFLAG_RD, &stats->mlfc,
4102                         "MAC Local Faults");
4103         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4104                         CTLFLAG_RD, &stats->mrfc,
4105                         "MAC Remote Faults");
4106         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4107                         CTLFLAG_RD, &stats->rlec,
4108                         "Receive Length Errors");
4109
4110         /* Flow Control stats */
4111         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4112                         CTLFLAG_RD, &stats->lxontxc,
4113                         "Link XON Transmitted");
4114         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4115                         CTLFLAG_RD, &stats->lxonrxc,
4116                         "Link XON Received");
4117         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4118                         CTLFLAG_RD, &stats->lxofftxc,
4119                         "Link XOFF Transmitted");
4120         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4121                         CTLFLAG_RD, &stats->lxoffrxc,
4122                         "Link XOFF Received");
4123
4124         /* Packet Reception Stats */
4125         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4126                         CTLFLAG_RD, &stats->tor, 
4127                         "Total Octets Received"); 
4128         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4129                         CTLFLAG_RD, &stats->gorc, 
4130                         "Good Octets Received"); 
4131         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4132                         CTLFLAG_RD, &stats->tpr,
4133                         "Total Packets Received");
4134         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4135                         CTLFLAG_RD, &stats->gprc,
4136                         "Good Packets Received");
4137         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4138                         CTLFLAG_RD, &stats->mprc,
4139                         "Multicast Packets Received");
4140         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4141                         CTLFLAG_RD, &stats->bprc,
4142                         "Broadcast Packets Received");
4143         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4144                         CTLFLAG_RD, &stats->prc64,
4145                         "64 byte frames received ");
4146         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4147                         CTLFLAG_RD, &stats->prc127,
4148                         "65-127 byte frames received");
4149         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4150                         CTLFLAG_RD, &stats->prc255,
4151                         "128-255 byte frames received");
4152         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4153                         CTLFLAG_RD, &stats->prc511,
4154                         "256-511 byte frames received");
4155         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4156                         CTLFLAG_RD, &stats->prc1023,
4157                         "512-1023 byte frames received");
4158         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4159                         CTLFLAG_RD, &stats->prc1522,
4160                         "1023-1522 byte frames received");
4161         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4162                         CTLFLAG_RD, &stats->ruc,
4163                         "Receive Undersized");
4164         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4165                         CTLFLAG_RD, &stats->rfc,
4166                         "Fragmented Packets Received ");
4167         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4168                         CTLFLAG_RD, &stats->roc,
4169                         "Oversized Packets Received");
4170         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4171                         CTLFLAG_RD, &stats->rjc,
4172                         "Received Jabber");
4173         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4174                         CTLFLAG_RD, &stats->mngprc,
4175                         "Management Packets Received");
4176         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4177                         CTLFLAG_RD, &stats->mngptc,
4178                         "Management Packets Dropped");
4179         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4180                         CTLFLAG_RD, &stats->xec,
4181                         "Checksum Errors");
4182
4183         /* Packet Transmission Stats */
4184         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4185                         CTLFLAG_RD, &stats->gotc, 
4186                         "Good Octets Transmitted"); 
4187         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4188                         CTLFLAG_RD, &stats->tpt,
4189                         "Total Packets Transmitted");
4190         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4191                         CTLFLAG_RD, &stats->gptc,
4192                         "Good Packets Transmitted");
4193         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4194                         CTLFLAG_RD, &stats->bptc,
4195                         "Broadcast Packets Transmitted");
4196         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4197                         CTLFLAG_RD, &stats->mptc,
4198                         "Multicast Packets Transmitted");
4199         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4200                         CTLFLAG_RD, &stats->mngptc,
4201                         "Management Packets Transmitted");
4202         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4203                         CTLFLAG_RD, &stats->ptc64,
4204                         "64 byte frames transmitted ");
4205         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4206                         CTLFLAG_RD, &stats->ptc127,
4207                         "65-127 byte frames transmitted");
4208         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4209                         CTLFLAG_RD, &stats->ptc255,
4210                         "128-255 byte frames transmitted");
4211         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4212                         CTLFLAG_RD, &stats->ptc511,
4213                         "256-511 byte frames transmitted");
4214         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4215                         CTLFLAG_RD, &stats->ptc1023,
4216                         "512-1023 byte frames transmitted");
4217         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4218                         CTLFLAG_RD, &stats->ptc1522,
4219                         "1024-1522 byte frames transmitted");
4220 }
4221
4222 /*
4223 ** Set flow control using sysctl:
4224 ** Flow control values:
4225 **      0 - off
4226 **      1 - rx pause
4227 **      2 - tx pause
4228 **      3 - full
4229 */
4230 static int
4231 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4232 {
4233         int error, last;
4234         struct adapter *adapter = (struct adapter *) arg1;
4235
4236         last = adapter->fc;
4237         error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4238         if ((error) || (req->newptr == NULL))
4239                 return (error);
4240
4241         /* Don't bother if it's not changed */
4242         if (adapter->fc == last)
4243                 return (0);
4244
4245         switch (adapter->fc) {
4246                 case ixgbe_fc_rx_pause:
4247                 case ixgbe_fc_tx_pause:
4248                 case ixgbe_fc_full:
4249                         adapter->hw.fc.requested_mode = adapter->fc;
4250                         if (adapter->num_queues > 1)
4251                                 ixgbe_disable_rx_drop(adapter);
4252                         break;
4253                 case ixgbe_fc_none:
4254                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
4255                         if (adapter->num_queues > 1)
4256                                 ixgbe_enable_rx_drop(adapter);
4257                         break;
4258                 default:
4259                         adapter->fc = last;
4260                         return (EINVAL);
4261         }
4262         /* Don't autoneg if forcing a value */
4263         adapter->hw.fc.disable_fc_autoneg = TRUE;
4264         ixgbe_fc_enable(&adapter->hw);
4265         return error;
4266 }
4267
4268 /*
4269 ** Control advertised link speed:
4270 **      Flags:
4271 **      0x1 - advertise 100 Mb
4272 **      0x2 - advertise 1G
4273 **      0x4 - advertise 10G
4274 */
4275 static int
4276 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4277 {
4278         int                     error = 0, requested;
4279         struct adapter          *adapter;
4280         device_t                dev;
4281         struct ixgbe_hw         *hw;
4282         ixgbe_link_speed        speed = 0;
4283
4284         adapter = (struct adapter *) arg1;
4285         dev = adapter->dev;
4286         hw = &adapter->hw;
4287
4288         requested = adapter->advertise;
4289         error = sysctl_handle_int(oidp, &requested, 0, req);
4290         if ((error) || (req->newptr == NULL))
4291                 return (error);
4292
4293         /* Checks to validate new value */
4294         if (adapter->advertise == requested) /* no change */
4295                 return (0);
4296
4297         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4298             (hw->phy.multispeed_fiber))) {
4299                 device_printf(dev,
4300                     "Advertised speed can only be set on copper or "
4301                     "multispeed fiber media types.\n");
4302                 return (EINVAL);
4303         }
4304
4305         if (requested < 0x1 || requested > 0x7) {
4306                 device_printf(dev,
4307                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4308                 return (EINVAL);
4309         }
4310
4311         if ((requested & 0x1)
4312             && (hw->mac.type != ixgbe_mac_X540)
4313             && (hw->mac.type != ixgbe_mac_X550)) {
4314                 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4315                 return (EINVAL);
4316         }
4317
4318         /* Set new value and report new advertised mode */
4319         if (requested & 0x1)
4320                 speed |= IXGBE_LINK_SPEED_100_FULL;
4321         if (requested & 0x2)
4322                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4323         if (requested & 0x4)
4324                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4325
4326         hw->mac.autotry_restart = TRUE;
4327         hw->mac.ops.setup_link(hw, speed, TRUE);
4328         adapter->advertise = requested;
4329
4330         return (error);
4331 }
4332
4333 /*
4334  * The following two sysctls are for X550 BaseT devices;
4335  * they deal with the external PHY used in them.
4336  */
4337 static int
4338 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4339 {
4340         struct adapter  *adapter = (struct adapter *) arg1;
4341         struct ixgbe_hw *hw = &adapter->hw;
4342         u16 reg;
4343
4344         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4345                 device_printf(adapter->dev,
4346                     "Device has no supported external thermal sensor.\n");
4347                 return (ENODEV);
4348         }
4349
4350         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4351                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4352                                       &reg)) {
4353                 device_printf(adapter->dev,
4354                     "Error reading from PHY's current temperature register\n");
4355                 return (EAGAIN);
4356         }
4357
4358         /* Shift temp for output */
4359         reg = reg >> 8;
4360
4361         return (sysctl_handle_int(oidp, NULL, reg, req));
4362 }
4363
4364 /*
4365  * Reports whether the current PHY temperature is over
4366  * the overtemp threshold.
4367  *  - This is reported directly from the PHY
4368  */
4369 static int
4370 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4371 {
4372         struct adapter  *adapter = (struct adapter *) arg1;
4373         struct ixgbe_hw *hw = &adapter->hw;
4374         u16 reg;
4375
4376         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4377                 device_printf(adapter->dev,
4378                     "Device has no supported external thermal sensor.\n");
4379                 return (ENODEV);
4380         }
4381
4382         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4383                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4384                                       &reg)) {
4385                 device_printf(adapter->dev,
4386                     "Error reading from PHY's temperature status register\n");
4387                 return (EAGAIN);
4388         }
4389
4390         /* Get occurrence bit */
4391         reg = !!(reg & 0x4000);
4392         return (sysctl_handle_int(oidp, 0, reg, req));
4393 }
4394
4395 /*
4396 ** Thermal Shutdown Trigger (internal MAC)
4397 **   - Set this to 1 to cause an overtemp event to occur
4398 */
4399 static int
4400 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4401 {
4402         struct adapter  *adapter = (struct adapter *) arg1;
4403         struct ixgbe_hw *hw = &adapter->hw;
4404         int error, fire = 0;
4405
4406         error = sysctl_handle_int(oidp, &fire, 0, req);
4407         if ((error) || (req->newptr == NULL))
4408                 return (error);
4409
4410         if (fire) {
4411                 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4412                 reg |= IXGBE_EICR_TS;
4413                 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4414         }
4415
4416         return (0);
4417 }
4418
4419 /*
4420 ** Manage DMA Coalescing.
4421 ** Control values:
4422 **      0/1 - off / on (use default value of 1000)
4423 **
4424 **      Legal timer values are:
4425 **      50,100,250,500,1000,2000,5000,10000
4426 **
4427 **      Turning off interrupt moderation will also turn this off.
4428 */
4429 static int
4430 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4431 {
4432         struct adapter *adapter = (struct adapter *) arg1;
4433         struct ixgbe_hw *hw = &adapter->hw;
4434         struct ifnet *ifp = adapter->ifp;
4435         int             error;
4436         u16             oldval;
4437
4438         oldval = adapter->dmac;
4439         error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4440         if ((error) || (req->newptr == NULL))
4441                 return (error);
4442
4443         switch (hw->mac.type) {
4444         case ixgbe_mac_X550:
4445         case ixgbe_mac_X550EM_x:
4446                 break;
4447         default:
4448                 device_printf(adapter->dev,
4449                     "DMA Coalescing is only supported on X550 devices\n");
4450                 return (ENODEV);
4451         }
4452
4453         switch (adapter->dmac) {
4454         case 0:
4455                 /* Disabled */
4456                 break;
4457         case 1: /* Enable and use default */
4458                 adapter->dmac = 1000;
4459                 break;
4460         case 50:
4461         case 100:
4462         case 250:
4463         case 500:
4464         case 1000:
4465         case 2000:
4466         case 5000:
4467         case 10000:
4468                 /* Legal values - allow */
4469                 break;
4470         default:
4471                 /* Do nothing, illegal value */
4472                 adapter->dmac = oldval;
4473                 return (EINVAL);
4474         }
4475
4476         /* Re-initialize hardware if it's already running */
4477         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4478                 ixgbe_init(adapter);
4479
4480         return (0);
4481 }
4482
4483 /*
4484  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4485  * Values:
4486  *      0 - disabled
4487  *      1 - enabled
4488  */
4489 static int
4490 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4491 {
4492         struct adapter *adapter = (struct adapter *) arg1;
4493         struct ixgbe_hw *hw = &adapter->hw;
4494         int new_wol_enabled;
4495         int error = 0;
4496
4497         new_wol_enabled = hw->wol_enabled;
4498         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4499         if ((error) || (req->newptr == NULL))
4500                 return (error);
4501         if (new_wol_enabled == hw->wol_enabled)
4502                 return (0);
4503
4504         if (new_wol_enabled > 0 && !adapter->wol_support)
4505                 return (ENODEV);
4506         else
4507                 hw->wol_enabled = !!(new_wol_enabled);
4508
4509         return (0);
4510 }
4511
4512 /*
4513  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4514  * if supported by the adapter.
4515  * Values:
4516  *      0 - disabled
4517  *      1 - enabled
4518  */
4519 static int
4520 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4521 {
4522         struct adapter *adapter = (struct adapter *) arg1;
4523         struct ifnet *ifp = adapter->ifp;
4524         int new_eee_enabled, error = 0;
4525
4526         new_eee_enabled = adapter->eee_enabled;
4527         error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4528         if ((error) || (req->newptr == NULL))
4529                 return (error);
4530         if (new_eee_enabled == adapter->eee_enabled)
4531                 return (0);
4532
4533         if (new_eee_enabled > 0 && !adapter->eee_support)
4534                 return (ENODEV);
4535         else
4536                 adapter->eee_enabled = !!(new_eee_enabled);
4537
4538         /* Re-initialize hardware if it's already running */
4539         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4540                 ixgbe_init(adapter);
4541
4542         return (0);
4543 }
4544
4545 /*
4546  * Read-only sysctl indicating whether EEE support was negotiated
4547  * on the link.
4548  */
4549 static int
4550 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4551 {
4552         struct adapter *adapter = (struct adapter *) arg1;
4553         struct ixgbe_hw *hw = &adapter->hw;
4554         bool status;
4555
4556         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4557
4558         return (sysctl_handle_int(oidp, 0, status, req));
4559 }
4560
4561 /*
4562  * Read-only sysctl indicating whether RX Link is in LPI state.
4563  */
4564 static int
4565 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4566 {
4567         struct adapter *adapter = (struct adapter *) arg1;
4568         struct ixgbe_hw *hw = &adapter->hw;
4569         bool status;
4570
4571         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4572             IXGBE_EEE_RX_LPI_STATUS);
4573
4574         return (sysctl_handle_int(oidp, 0, status, req));
4575 }
4576
4577 /*
4578  * Read-only sysctl indicating whether TX Link is in LPI state.
4579  */
4580 static int
4581 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4582 {
4583         struct adapter *adapter = (struct adapter *) arg1;
4584         struct ixgbe_hw *hw = &adapter->hw;
4585         bool status;
4586
4587         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4588             IXGBE_EEE_TX_LPI_STATUS);
4589
4590         return (sysctl_handle_int(oidp, 0, status, req));
4591 }
4592
4593 /*
4594  * Sysctl to enable/disable the types of packets that the
4595  * adapter will wake up on upon receipt.
4596  * WUFC - Wake Up Filter Control
4597  * Flags:
4598  *      0x1  - Link Status Change
4599  *      0x2  - Magic Packet
4600  *      0x4  - Direct Exact
4601  *      0x8  - Directed Multicast
4602  *      0x10 - Broadcast
4603  *      0x20 - ARP/IPv4 Request Packet
4604  *      0x40 - Direct IPv4 Packet
4605  *      0x80 - Direct IPv6 Packet
4606  *
4607  * Setting another flag will cause the sysctl to return an
4608  * error.
4609  */
4610 static int
4611 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4612 {
4613         struct adapter *adapter = (struct adapter *) arg1;
4614         int error = 0;
4615         u32 new_wufc;
4616
4617         new_wufc = adapter->wufc;
4618
4619         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4620         if ((error) || (req->newptr == NULL))
4621                 return (error);
4622         if (new_wufc == adapter->wufc)
4623                 return (0);
4624
4625         if (new_wufc & 0xffffff00)
4626                 return (EINVAL);
4627         else {
4628                 new_wufc &= 0xff;
4629                 new_wufc |= (0xffffff & adapter->wufc);
4630                 adapter->wufc = new_wufc;
4631         }
4632
4633         return (0);
4634 }
4635
4636 /*
4637 ** Enable the hardware to drop packets when the buffer is
4638 ** full. This is useful when multiqueue,so that no single
4639 ** queue being full stalls the entire RX engine. We only
4640 ** enable this when Multiqueue AND when Flow Control is 
4641 ** disabled.
4642 */
4643 static void
4644 ixgbe_enable_rx_drop(struct adapter *adapter)
4645 {
4646         struct ixgbe_hw *hw = &adapter->hw;
4647
4648         for (int i = 0; i < adapter->num_queues; i++) {
4649                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4650                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4651                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4652         }
4653 }
4654
4655 static void
4656 ixgbe_disable_rx_drop(struct adapter *adapter)
4657 {
4658         struct ixgbe_hw *hw = &adapter->hw;
4659
4660         for (int i = 0; i < adapter->num_queues; i++) {
4661                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4662                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4663                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4664         }
4665 }
4666
4667 static void
4668 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4669 {
4670         u32 mask;
4671
4672         switch (adapter->hw.mac.type) {
4673         case ixgbe_mac_82598EB:
4674                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4675                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4676                 break;
4677         case ixgbe_mac_82599EB:
4678         case ixgbe_mac_X540:
4679         case ixgbe_mac_X550:
4680         case ixgbe_mac_X550EM_x:
4681                 mask = (queues & 0xFFFFFFFF);
4682                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4683                 mask = (queues >> 32);
4684                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4685                 break;
4686         default:
4687                 break;
4688         }
4689 }
4690
4691