]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ix.c
Limit the number of autoconfigured queues to 8.
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Set this to one to display debug statistics
45  *********************************************************************/
46 int             ixgbe_display_debug_stats = 0;
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixgbe_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64 {
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95         /* required last entry */
96         {0, 0, 0, 0, 0}
97 };
98
99 /*********************************************************************
100  *  Table of branding strings
101  *********************************************************************/
102
103 static char    *ixgbe_strings[] = {
104         "Intel(R) PRO/10GbE PCI-Express Network Driver"
105 };
106
107 /*********************************************************************
108  *  Function prototypes
109  *********************************************************************/
110 static int      ixgbe_probe(device_t);
111 static int      ixgbe_attach(device_t);
112 static int      ixgbe_detach(device_t);
113 static int      ixgbe_shutdown(device_t);
114 static int      ixgbe_suspend(device_t);
115 static int      ixgbe_resume(device_t);
116 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void     ixgbe_init(void *);
118 static void     ixgbe_init_locked(struct adapter *);
119 static void     ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
122 #endif
123 static void     ixgbe_add_media_types(struct adapter *);
124 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int      ixgbe_media_change(struct ifnet *);
126 static void     ixgbe_identify_hardware(struct adapter *);
127 static int      ixgbe_allocate_pci_resources(struct adapter *);
128 static void     ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int      ixgbe_allocate_msix(struct adapter *);
130 static int      ixgbe_allocate_legacy(struct adapter *);
131 static int      ixgbe_setup_msix(struct adapter *);
132 static void     ixgbe_free_pci_resources(struct adapter *);
133 static void     ixgbe_local_timer(void *);
134 static int      ixgbe_setup_interface(device_t, struct adapter *);
135 static void     ixgbe_config_dmac(struct adapter *);
136 static void     ixgbe_config_delay_values(struct adapter *);
137 static void     ixgbe_config_link(struct adapter *);
138 static void     ixgbe_check_eee_support(struct adapter *);
139 static void     ixgbe_check_wol_support(struct adapter *);
140 static int      ixgbe_setup_low_power_mode(struct adapter *);
141 static void     ixgbe_rearm_queues(struct adapter *, u64);
142
143 static void     ixgbe_initialize_transmit_units(struct adapter *);
144 static void     ixgbe_initialize_receive_units(struct adapter *);
145 static void     ixgbe_enable_rx_drop(struct adapter *);
146 static void     ixgbe_disable_rx_drop(struct adapter *);
147
148 static void     ixgbe_enable_intr(struct adapter *);
149 static void     ixgbe_disable_intr(struct adapter *);
150 static void     ixgbe_update_stats_counters(struct adapter *);
151 static void     ixgbe_set_promisc(struct adapter *);
152 static void     ixgbe_set_multi(struct adapter *);
153 static void     ixgbe_update_link_status(struct adapter *);
154 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void     ixgbe_configure_ivars(struct adapter *);
156 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
157
158 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
161
162 static void     ixgbe_add_device_sysctls(struct adapter *);
163 static void     ixgbe_add_hw_stats(struct adapter *);
164
165 /* Sysctl handlers */
166 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int      ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int      ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int      ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int      ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int      ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
178
179 /* Support for pluggable optic modules */
180 static bool     ixgbe_sfp_probe(struct adapter *);
181 static void     ixgbe_setup_optics(struct adapter *);
182
183 /* Legacy (single vector interrupt handler */
184 static void     ixgbe_legacy_irq(void *);
185
186 /* The MSI/X Interrupt handlers */
187 static void     ixgbe_msix_que(void *);
188 static void     ixgbe_msix_link(void *);
189
190 /* Deferred interrupt tasklets */
191 static void     ixgbe_handle_que(void *, int);
192 static void     ixgbe_handle_link(void *, int);
193 static void     ixgbe_handle_msf(void *, int);
194 static void     ixgbe_handle_mod(void *, int);
195 static void     ixgbe_handle_phy(void *, int);
196
197 #ifdef IXGBE_FDIR
198 static void     ixgbe_reinit_fdir(void *, int);
199 #endif
200
201 /*********************************************************************
202  *  FreeBSD Device Interface Entry Points
203  *********************************************************************/
204
205 static device_method_t ix_methods[] = {
206         /* Device interface */
207         DEVMETHOD(device_probe, ixgbe_probe),
208         DEVMETHOD(device_attach, ixgbe_attach),
209         DEVMETHOD(device_detach, ixgbe_detach),
210         DEVMETHOD(device_shutdown, ixgbe_shutdown),
211         DEVMETHOD(device_suspend, ixgbe_suspend),
212         DEVMETHOD(device_resume, ixgbe_resume),
213         DEVMETHOD_END
214 };
215
216 static driver_t ix_driver = {
217         "ix", ix_methods, sizeof(struct adapter),
218 };
219
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
222
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
225
226 /*
227 ** TUNEABLE PARAMETERS:
228 */
229
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231                    "IXGBE driver parameters");
232
233 /*
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
238 */
239 static int ixgbe_enable_aim = TRUE;
240 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241     "Enable adaptive interrupt moderation");
242
243 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
246
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 256;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251     &ixgbe_rx_process_limit, 0,
252     "Maximum number of received packets to process at a time,"
253     "-1 means unlimited");
254
255 /* How many packets txeof tries to clean at a time */
256 static int ixgbe_tx_process_limit = 256;
257 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259     &ixgbe_tx_process_limit, 0,
260     "Maximum number of sent packets to process at a time,"
261     "-1 means unlimited");
262
263 /*
264 ** Smart speed setting, default to on
265 ** this only works as a compile option
266 ** right now as its during attach, set
267 ** this to 'ixgbe_smart_speed_off' to
268 ** disable.
269 */
270 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
271
272 /*
273  * MSIX should be the default for best performance,
274  * but this allows it to be forced off for testing.
275  */
276 static int ixgbe_enable_msix = 1;
277 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278     "Enable MSI-X interrupts");
279
280 /*
281  * Number of Queues, can be set to 0,
282  * it then autoconfigures based on the
283  * number of cpus with a max of 8. This
284  * can be overriden manually here.
285  */
286 static int ixgbe_num_queues = 0;
287 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288     "Number of queues to configure up to a maximum of 8; "
289     "0 indicates autoconfigure");
290
291 /*
292 ** Number of TX descriptors per ring,
293 ** setting higher than RX as this seems
294 ** the better performing choice.
295 */
296 static int ixgbe_txd = PERFORM_TXD;
297 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
298     "Number of transmit descriptors per queue");
299
300 /* Number of RX descriptors per ring */
301 static int ixgbe_rxd = PERFORM_RXD;
302 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
303     "Number of receive descriptors per queue");
304
305 /*
306 ** Defining this on will allow the use
307 ** of unsupported SFP+ modules, note that
308 ** doing so you are on your own :)
309 */
310 static int allow_unsupported_sfp = FALSE;
311 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
312
313 /* Keep running tab on them for sanity check */
314 static int ixgbe_total_ports;
315
316 #ifdef IXGBE_FDIR
317 /* 
318 ** Flow Director actually 'steals'
319 ** part of the packet buffer as its
320 ** filter pool, this variable controls
321 ** how much it uses:
322 **  0 = 64K, 1 = 128K, 2 = 256K
323 */
324 static int fdir_pballoc = 1;
325 #endif
326
327 #ifdef DEV_NETMAP
328 /*
329  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
330  * be a reference on how to implement netmap support in a driver.
331  * Additional comments are in ixgbe_netmap.h .
332  *
333  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
334  * that extend the standard driver.
335  */
336 #include <dev/netmap/ixgbe_netmap.h>
337 #endif /* DEV_NETMAP */
338
339 /*********************************************************************
340  *  Device identification routine
341  *
342  *  ixgbe_probe determines if the driver should be loaded on
343  *  adapter based on PCI vendor/device id of the adapter.
344  *
345  *  return BUS_PROBE_DEFAULT on success, positive on failure
346  *********************************************************************/
347
348 static int
349 ixgbe_probe(device_t dev)
350 {
351         ixgbe_vendor_info_t *ent;
352
353         u16     pci_vendor_id = 0;
354         u16     pci_device_id = 0;
355         u16     pci_subvendor_id = 0;
356         u16     pci_subdevice_id = 0;
357         char    adapter_name[256];
358
359         INIT_DEBUGOUT("ixgbe_probe: begin");
360
361         pci_vendor_id = pci_get_vendor(dev);
362         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
363                 return (ENXIO);
364
365         pci_device_id = pci_get_device(dev);
366         pci_subvendor_id = pci_get_subvendor(dev);
367         pci_subdevice_id = pci_get_subdevice(dev);
368
369         ent = ixgbe_vendor_info_array;
370         while (ent->vendor_id != 0) {
371                 if ((pci_vendor_id == ent->vendor_id) &&
372                     (pci_device_id == ent->device_id) &&
373
374                     ((pci_subvendor_id == ent->subvendor_id) ||
375                      (ent->subvendor_id == 0)) &&
376
377                     ((pci_subdevice_id == ent->subdevice_id) ||
378                      (ent->subdevice_id == 0))) {
379                         sprintf(adapter_name, "%s, Version - %s",
380                                 ixgbe_strings[ent->index],
381                                 ixgbe_driver_version);
382                         device_set_desc_copy(dev, adapter_name);
383                         ++ixgbe_total_ports;
384                         return (BUS_PROBE_DEFAULT);
385                 }
386                 ent++;
387         }
388         return (ENXIO);
389 }
390
391 /*********************************************************************
392  *  Device initialization routine
393  *
394  *  The attach entry point is called when the driver is being loaded.
395  *  This routine identifies the type of hardware, allocates all resources
396  *  and initializes the hardware.
397  *
398  *  return 0 on success, positive on failure
399  *********************************************************************/
400
401 static int
402 ixgbe_attach(device_t dev)
403 {
404         struct adapter *adapter;
405         struct ixgbe_hw *hw;
406         int             error = 0;
407         u16             csum;
408         u32             ctrl_ext;
409
410         INIT_DEBUGOUT("ixgbe_attach: begin");
411
412         /* Allocate, clear, and link in our adapter structure */
413         adapter = device_get_softc(dev);
414         adapter->dev = adapter->osdep.dev = dev;
415         hw = &adapter->hw;
416
417         /* Core Lock Init*/
418         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
419
420         /* Set up the timer callout */
421         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
422
423         /* Determine hardware revision */
424         ixgbe_identify_hardware(adapter);
425
426         /* Do base PCI setup - map BAR0 */
427         if (ixgbe_allocate_pci_resources(adapter)) {
428                 device_printf(dev, "Allocation of PCI resources failed\n");
429                 error = ENXIO;
430                 goto err_out;
431         }
432
433         /* Do descriptor calc and sanity checks */
434         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
435             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
436                 device_printf(dev, "TXD config issue, using default!\n");
437                 adapter->num_tx_desc = DEFAULT_TXD;
438         } else
439                 adapter->num_tx_desc = ixgbe_txd;
440
441         /*
442         ** With many RX rings it is easy to exceed the
443         ** system mbuf allocation. Tuning nmbclusters
444         ** can alleviate this.
445         */
446         if (nmbclusters > 0) {
447                 int s;
448                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
449                 if (s > nmbclusters) {
450                         device_printf(dev, "RX Descriptors exceed "
451                             "system mbuf max, using default instead!\n");
452                         ixgbe_rxd = DEFAULT_RXD;
453                 }
454         }
455
456         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
457             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
458                 device_printf(dev, "RXD config issue, using default!\n");
459                 adapter->num_rx_desc = DEFAULT_RXD;
460         } else
461                 adapter->num_rx_desc = ixgbe_rxd;
462
463         /* Allocate our TX/RX Queues */
464         if (ixgbe_allocate_queues(adapter)) {
465                 error = ENOMEM;
466                 goto err_out;
467         }
468
469         /* Allocate multicast array memory. */
470         adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
471             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
472         if (adapter->mta == NULL) {
473                 device_printf(dev, "Can not allocate multicast setup array\n");
474                 error = ENOMEM;
475                 goto err_late;
476         }
477
478         /* Initialize the shared code */
479         hw->allow_unsupported_sfp = allow_unsupported_sfp;
480         error = ixgbe_init_shared_code(hw);
481         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
482                 /*
483                 ** No optics in this port, set up
484                 ** so the timer routine will probe 
485                 ** for later insertion.
486                 */
487                 adapter->sfp_probe = TRUE;
488                 error = 0;
489         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
490                 device_printf(dev,"Unsupported SFP+ module detected!\n");
491                 error = EIO;
492                 goto err_late;
493         } else if (error) {
494                 device_printf(dev,"Unable to initialize the shared code\n");
495                 error = EIO;
496                 goto err_late;
497         }
498
499         /* Make sure we have a good EEPROM before we read from it */
500         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
501                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
502                 error = EIO;
503                 goto err_late;
504         }
505
506         error = ixgbe_init_hw(hw);
507         switch (error) {
508         case IXGBE_ERR_EEPROM_VERSION:
509                 device_printf(dev, "This device is a pre-production adapter/"
510                     "LOM.  Please be aware there may be issues associated "
511                     "with your hardware.\n If you are experiencing problems "
512                     "please contact your Intel or hardware representative "
513                     "who provided you with this hardware.\n");
514                 break;
515         case IXGBE_ERR_SFP_NOT_SUPPORTED:
516                 device_printf(dev,"Unsupported SFP+ Module\n");
517                 error = EIO;
518                 goto err_late;
519         case IXGBE_ERR_SFP_NOT_PRESENT:
520                 device_printf(dev,"No SFP+ Module found\n");
521                 /* falls thru */
522         default:
523                 break;
524         }
525
526         /* Detect and set physical type */
527         ixgbe_setup_optics(adapter);
528
529         if ((adapter->msix > 1) && (ixgbe_enable_msix))
530                 error = ixgbe_allocate_msix(adapter); 
531         else
532                 error = ixgbe_allocate_legacy(adapter); 
533         if (error) 
534                 goto err_late;
535
536         /* Setup OS specific network interface */
537         if (ixgbe_setup_interface(dev, adapter) != 0)
538                 goto err_late;
539
540         /* Initialize statistics */
541         ixgbe_update_stats_counters(adapter);
542
543         /* Register for VLAN events */
544         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
545             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
546         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
547             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
548
549         /* Check PCIE slot type/speed/width */
550         ixgbe_get_slot_info(hw);
551
552         /* Set an initial default flow control value */
553         adapter->fc = ixgbe_fc_full;
554
555         /* Check for certain supported features */
556         ixgbe_check_wol_support(adapter);
557         ixgbe_check_eee_support(adapter);
558
559         /* Add sysctls */
560         ixgbe_add_device_sysctls(adapter);
561         ixgbe_add_hw_stats(adapter);
562
563         /* let hardware know driver is loaded */
564         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
567
568 #ifdef DEV_NETMAP
569         ixgbe_netmap_attach(adapter);
570 #endif /* DEV_NETMAP */
571         INIT_DEBUGOUT("ixgbe_attach: end");
572         return (0);
573
574 err_late:
575         ixgbe_free_transmit_structures(adapter);
576         ixgbe_free_receive_structures(adapter);
577 err_out:
578         if (adapter->ifp != NULL)
579                 if_free(adapter->ifp);
580         ixgbe_free_pci_resources(adapter);
581         free(adapter->mta, M_DEVBUF);
582         return (error);
583 }
584
585 /*********************************************************************
586  *  Device removal routine
587  *
588  *  The detach entry point is called when the driver is being removed.
589  *  This routine stops the adapter and deallocates all the resources
590  *  that were allocated for driver operation.
591  *
592  *  return 0 on success, positive on failure
593  *********************************************************************/
594
595 static int
596 ixgbe_detach(device_t dev)
597 {
598         struct adapter *adapter = device_get_softc(dev);
599         struct ix_queue *que = adapter->queues;
600         struct tx_ring *txr = adapter->tx_rings;
601         u32     ctrl_ext;
602
603         INIT_DEBUGOUT("ixgbe_detach: begin");
604
605         /* Make sure VLANS are not using driver */
606         if (adapter->ifp->if_vlantrunk != NULL) {
607                 device_printf(dev,"Vlan in use, detach first\n");
608                 return (EBUSY);
609         }
610
611         /* Stop the adapter */
612         IXGBE_CORE_LOCK(adapter);
613         ixgbe_setup_low_power_mode(adapter);
614         IXGBE_CORE_UNLOCK(adapter);
615
616         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
617                 if (que->tq) {
618 #ifndef IXGBE_LEGACY_TX
619                         taskqueue_drain(que->tq, &txr->txq_task);
620 #endif
621                         taskqueue_drain(que->tq, &que->que_task);
622                         taskqueue_free(que->tq);
623                 }
624         }
625
626         /* Drain the Link queue */
627         if (adapter->tq) {
628                 taskqueue_drain(adapter->tq, &adapter->link_task);
629                 taskqueue_drain(adapter->tq, &adapter->mod_task);
630                 taskqueue_drain(adapter->tq, &adapter->msf_task);
631                 taskqueue_drain(adapter->tq, &adapter->phy_task);
632 #ifdef IXGBE_FDIR
633                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
634 #endif
635                 taskqueue_free(adapter->tq);
636         }
637
638         /* let hardware know driver is unloading */
639         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
642
643         /* Unregister VLAN events */
644         if (adapter->vlan_attach != NULL)
645                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646         if (adapter->vlan_detach != NULL)
647                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
648
649         ether_ifdetach(adapter->ifp);
650         callout_drain(&adapter->timer);
651 #ifdef DEV_NETMAP
652         netmap_detach(adapter->ifp);
653 #endif /* DEV_NETMAP */
654         ixgbe_free_pci_resources(adapter);
655         bus_generic_detach(dev);
656         if_free(adapter->ifp);
657
658         ixgbe_free_transmit_structures(adapter);
659         ixgbe_free_receive_structures(adapter);
660         free(adapter->mta, M_DEVBUF);
661
662         IXGBE_CORE_LOCK_DESTROY(adapter);
663         return (0);
664 }
665
666 /*********************************************************************
667  *
668  *  Shutdown entry point
669  *
670  **********************************************************************/
671
672 static int
673 ixgbe_shutdown(device_t dev)
674 {
675         struct adapter *adapter = device_get_softc(dev);
676         int error = 0;
677
678         INIT_DEBUGOUT("ixgbe_shutdown: begin");
679
680         IXGBE_CORE_LOCK(adapter);
681         error = ixgbe_setup_low_power_mode(adapter);
682         IXGBE_CORE_UNLOCK(adapter);
683
684         return (error);
685 }
686
687 /**
688  * Methods for going from:
689  * D0 -> D3: ixgbe_suspend
690  * D3 -> D0: ixgbe_resume
691  */
692 static int
693 ixgbe_suspend(device_t dev)
694 {
695         struct adapter *adapter = device_get_softc(dev);
696         int error = 0;
697
698         INIT_DEBUGOUT("ixgbe_suspend: begin");
699
700         IXGBE_CORE_LOCK(adapter);
701
702         error = ixgbe_setup_low_power_mode(adapter);
703
704         /* Save state and power down */
705         pci_save_state(dev);
706         pci_set_powerstate(dev, PCI_POWERSTATE_D3);
707
708         IXGBE_CORE_UNLOCK(adapter);
709
710         return (error);
711 }
712
713 static int
714 ixgbe_resume(device_t dev)
715 {
716         struct adapter *adapter = device_get_softc(dev);
717         struct ifnet *ifp = adapter->ifp;
718         struct ixgbe_hw *hw = &adapter->hw;
719         u32 wus;
720
721         INIT_DEBUGOUT("ixgbe_resume: begin");
722
723         IXGBE_CORE_LOCK(adapter);
724
725         pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726         pci_restore_state(dev);
727
728         /* Read & clear WUS register */
729         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
730         if (wus)
731                 device_printf(dev, "Woken up by (WUS): %#010x\n",
732                     IXGBE_READ_REG(hw, IXGBE_WUS));
733         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734         /* And clear WUFC until next low-power transition */
735         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
736
737         /*
738          * Required after D3->D0 transition;
739          * will re-advertise all previous advertised speeds
740          */
741         if (ifp->if_flags & IFF_UP)
742                 ixgbe_init_locked(adapter);
743
744         IXGBE_CORE_UNLOCK(adapter);
745
746         INIT_DEBUGOUT("ixgbe_resume: end");
747         return (0);
748 }
749
750
751 /*********************************************************************
752  *  Ioctl entry point
753  *
754  *  ixgbe_ioctl is called when the user wants to configure the
755  *  interface.
756  *
757  *  return 0 on success, positive on failure
758  **********************************************************************/
759
760 static int
761 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
762 {
763         struct adapter  *adapter = ifp->if_softc;
764         struct ifreq    *ifr = (struct ifreq *) data;
765 #if defined(INET) || defined(INET6)
766         struct ifaddr *ifa = (struct ifaddr *)data;
767         bool            avoid_reset = FALSE;
768 #endif
769         int             error = 0;
770
771         switch (command) {
772
773         case SIOCSIFADDR:
774 #ifdef INET
775                 if (ifa->ifa_addr->sa_family == AF_INET)
776                         avoid_reset = TRUE;
777 #endif
778 #ifdef INET6
779                 if (ifa->ifa_addr->sa_family == AF_INET6)
780                         avoid_reset = TRUE;
781 #endif
782 #if defined(INET) || defined(INET6)
783                 /*
784                 ** Calling init results in link renegotiation,
785                 ** so we avoid doing it when possible.
786                 */
787                 if (avoid_reset) {
788                         ifp->if_flags |= IFF_UP;
789                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
790                                 ixgbe_init(adapter);
791                         if (!(ifp->if_flags & IFF_NOARP))
792                                 arp_ifinit(ifp, ifa);
793                 } else
794                         error = ether_ioctl(ifp, command, data);
795 #endif
796                 break;
797         case SIOCSIFMTU:
798                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
800                         error = EINVAL;
801                 } else {
802                         IXGBE_CORE_LOCK(adapter);
803                         ifp->if_mtu = ifr->ifr_mtu;
804                         adapter->max_frame_size =
805                                 ifp->if_mtu + IXGBE_MTU_HDR;
806                         ixgbe_init_locked(adapter);
807                         IXGBE_CORE_UNLOCK(adapter);
808                 }
809                 break;
810         case SIOCSIFFLAGS:
811                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812                 IXGBE_CORE_LOCK(adapter);
813                 if (ifp->if_flags & IFF_UP) {
814                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815                                 if ((ifp->if_flags ^ adapter->if_flags) &
816                                     (IFF_PROMISC | IFF_ALLMULTI)) {
817                                         ixgbe_set_promisc(adapter);
818                                 }
819                         } else
820                                 ixgbe_init_locked(adapter);
821                 } else
822                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
823                                 ixgbe_stop(adapter);
824                 adapter->if_flags = ifp->if_flags;
825                 IXGBE_CORE_UNLOCK(adapter);
826                 break;
827         case SIOCADDMULTI:
828         case SIOCDELMULTI:
829                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831                         IXGBE_CORE_LOCK(adapter);
832                         ixgbe_disable_intr(adapter);
833                         ixgbe_set_multi(adapter);
834                         ixgbe_enable_intr(adapter);
835                         IXGBE_CORE_UNLOCK(adapter);
836                 }
837                 break;
838         case SIOCSIFMEDIA:
839         case SIOCGIFMEDIA:
840                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
842                 break;
843         case SIOCSIFCAP:
844         {
845                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847                 if (mask & IFCAP_HWCSUM)
848                         ifp->if_capenable ^= IFCAP_HWCSUM;
849                 if (mask & IFCAP_TSO4)
850                         ifp->if_capenable ^= IFCAP_TSO4;
851                 if (mask & IFCAP_TSO6)
852                         ifp->if_capenable ^= IFCAP_TSO6;
853                 if (mask & IFCAP_LRO)
854                         ifp->if_capenable ^= IFCAP_LRO;
855                 if (mask & IFCAP_VLAN_HWTAGGING)
856                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857                 if (mask & IFCAP_VLAN_HWFILTER)
858                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859                 if (mask & IFCAP_VLAN_HWTSO)
860                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862                         IXGBE_CORE_LOCK(adapter);
863                         ixgbe_init_locked(adapter);
864                         IXGBE_CORE_UNLOCK(adapter);
865                 }
866                 VLAN_CAPABILITIES(ifp);
867                 break;
868         }
869 #if __FreeBSD_version >= 1100036
870         case SIOCGI2C:
871         {
872                 struct ixgbe_hw *hw = &adapter->hw;
873                 struct ifi2creq i2c;
874                 int i;
875                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
877                 if (error != 0)
878                         break;
879                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
880                         error = EINVAL;
881                         break;
882                 }
883                 if (i2c.len > sizeof(i2c.data)) {
884                         error = EINVAL;
885                         break;
886                 }
887
888                 for (i = 0; i < i2c.len; i++)
889                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890                             i2c.dev_addr, &i2c.data[i]);
891                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
892                 break;
893         }
894 #endif
895         default:
896                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897                 error = ether_ioctl(ifp, command, data);
898                 break;
899         }
900
901         return (error);
902 }
903
904 /*********************************************************************
905  *  Init entry point
906  *
907  *  This routine is used in two ways. It is used by the stack as
908  *  init entry point in network interface structure. It is also used
909  *  by the driver as a hw/sw initialization routine to get to a
910  *  consistent state.
911  *
912  *  return 0 on success, positive on failure
913  **********************************************************************/
914 #define IXGBE_MHADD_MFS_SHIFT 16
915
916 static void
917 ixgbe_init_locked(struct adapter *adapter)
918 {
919         struct ifnet   *ifp = adapter->ifp;
920         device_t        dev = adapter->dev;
921         struct ixgbe_hw *hw = &adapter->hw;
922         u32             k, txdctl, mhadd, gpie;
923         u32             rxdctl, rxctrl;
924
925         mtx_assert(&adapter->core_mtx, MA_OWNED);
926         INIT_DEBUGOUT("ixgbe_init_locked: begin");
927         hw->adapter_stopped = FALSE;
928         ixgbe_stop_adapter(hw);
929         callout_stop(&adapter->timer);
930
931         /* reprogram the RAR[0] in case user changed it. */
932         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
933
934         /* Get the latest mac address, User can use a LAA */
935         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936               IXGBE_ETH_LENGTH_OF_ADDRESS);
937         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938         hw->addr_ctrl.rar_used_count = 1;
939
940         /* Set the various hardware offload abilities */
941         ifp->if_hwassist = 0;
942         if (ifp->if_capenable & IFCAP_TSO)
943                 ifp->if_hwassist |= CSUM_TSO;
944         if (ifp->if_capenable & IFCAP_TXCSUM) {
945                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946 #if __FreeBSD_version >= 800000
947                 if (hw->mac.type != ixgbe_mac_82598EB)
948                         ifp->if_hwassist |= CSUM_SCTP;
949 #endif
950         }
951
952         /* Prepare transmit descriptors and buffers */
953         if (ixgbe_setup_transmit_structures(adapter)) {
954                 device_printf(dev, "Could not setup transmit structures\n");
955                 ixgbe_stop(adapter);
956                 return;
957         }
958
959         ixgbe_init_hw(hw);
960         ixgbe_initialize_transmit_units(adapter);
961
962         /* Setup Multicast table */
963         ixgbe_set_multi(adapter);
964
965         /*
966         ** Determine the correct mbuf pool
967         ** for doing jumbo frames
968         */
969         if (adapter->max_frame_size <= 2048)
970                 adapter->rx_mbuf_sz = MCLBYTES;
971         else if (adapter->max_frame_size <= 4096)
972                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
973         else if (adapter->max_frame_size <= 9216)
974                 adapter->rx_mbuf_sz = MJUM9BYTES;
975         else
976                 adapter->rx_mbuf_sz = MJUM16BYTES;
977
978         /* Prepare receive descriptors and buffers */
979         if (ixgbe_setup_receive_structures(adapter)) {
980                 device_printf(dev, "Could not setup receive structures\n");
981                 ixgbe_stop(adapter);
982                 return;
983         }
984
985         /* Configure RX settings */
986         ixgbe_initialize_receive_units(adapter);
987
988         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
989
990         /* Enable Fan Failure Interrupt */
991         gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
992
993         /* Add for Module detection */
994         if (hw->mac.type == ixgbe_mac_82599EB)
995                 gpie |= IXGBE_SDP2_GPIEN;
996
997         /*
998          * Thermal Failure Detection (X540)
999          * Link Detection (X552)
1000          */
1001         if (hw->mac.type == ixgbe_mac_X540 ||
1002             hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004                 gpie |= IXGBE_SDP0_GPIEN_X540;
1005
1006         if (adapter->msix > 1) {
1007                 /* Enable Enhanced MSIX mode */
1008                 gpie |= IXGBE_GPIE_MSIX_MODE;
1009                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1010                     IXGBE_GPIE_OCD;
1011         }
1012         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1013
1014         /* Set MTU size */
1015         if (ifp->if_mtu > ETHERMTU) {
1016                 /* aka IXGBE_MAXFRS on 82599 and newer */
1017                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1021         }
1022         
1023         /* Now enable all the queues */
1024         for (int i = 0; i < adapter->num_queues; i++) {
1025                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026                 txdctl |= IXGBE_TXDCTL_ENABLE;
1027                 /* Set WTHRESH to 8, burst writeback */
1028                 txdctl |= (8 << 16);
1029                 /*
1030                  * When the internal queue falls below PTHRESH (32),
1031                  * start prefetching as long as there are at least
1032                  * HTHRESH (1) buffers ready. The values are taken
1033                  * from the Intel linux driver 3.8.21.
1034                  * Prefetching enables tx line rate even with 1 queue.
1035                  */
1036                 txdctl |= (32 << 0) | (1 << 8);
1037                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1038         }
1039
1040         for (int i = 0; i < adapter->num_queues; i++) {
1041                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042                 if (hw->mac.type == ixgbe_mac_82598EB) {
1043                         /*
1044                         ** PTHRESH = 21
1045                         ** HTHRESH = 4
1046                         ** WTHRESH = 8
1047                         */
1048                         rxdctl &= ~0x3FFFFF;
1049                         rxdctl |= 0x080420;
1050                 }
1051                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1052                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053                 for (k = 0; k < 10; k++) {
1054                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055                             IXGBE_RXDCTL_ENABLE)
1056                                 break;
1057                         else
1058                                 msec_delay(1);
1059                 }
1060                 wmb();
1061 #ifdef DEV_NETMAP
1062                 /*
1063                  * In netmap mode, we must preserve the buffers made
1064                  * available to userspace before the if_init()
1065                  * (this is true by default on the TX side, because
1066                  * init makes all buffers available to userspace).
1067                  *
1068                  * netmap_reset() and the device specific routines
1069                  * (e.g. ixgbe_setup_receive_rings()) map these
1070                  * buffers at the end of the NIC ring, so here we
1071                  * must set the RDT (tail) register to make sure
1072                  * they are not overwritten.
1073                  *
1074                  * In this driver the NIC ring starts at RDH = 0,
1075                  * RDT points to the last slot available for reception (?),
1076                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1077                  */
1078                 if (ifp->if_capenable & IFCAP_NETMAP) {
1079                         struct netmap_adapter *na = NA(adapter->ifp);
1080                         struct netmap_kring *kring = &na->rx_rings[i];
1081                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1082
1083                         IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1084                 } else
1085 #endif /* DEV_NETMAP */
1086                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1087         }
1088
1089         /* Enable Receive engine */
1090         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091         if (hw->mac.type == ixgbe_mac_82598EB)
1092                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093         rxctrl |= IXGBE_RXCTRL_RXEN;
1094         ixgbe_enable_rx_dma(hw, rxctrl);
1095
1096         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1097
1098         /* Set up MSI/X routing */
1099         if (ixgbe_enable_msix)  {
1100                 ixgbe_configure_ivars(adapter);
1101                 /* Set up auto-mask */
1102                 if (hw->mac.type == ixgbe_mac_82598EB)
1103                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1104                 else {
1105                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1107                 }
1108         } else {  /* Simple settings for Legacy/MSI */
1109                 ixgbe_set_ivar(adapter, 0, 0, 0);
1110                 ixgbe_set_ivar(adapter, 0, 0, 1);
1111                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1112         }
1113
1114 #ifdef IXGBE_FDIR
1115         /* Init Flow director */
1116         if (hw->mac.type != ixgbe_mac_82598EB) {
1117                 u32 hdrm = 32 << fdir_pballoc;
1118
1119                 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1121         }
1122 #endif
1123
1124         /*
1125         ** Check on any SFP devices that
1126         ** need to be kick-started
1127         */
1128         if (hw->phy.type == ixgbe_phy_none) {
1129                 int err = hw->phy.ops.identify(hw);
1130                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1131                         device_printf(dev,
1132                             "Unsupported SFP+ module type was detected.\n");
1133                         return;
1134                 }
1135         }
1136
1137         /* Set moderation on the Link interrupt */
1138         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1139
1140         /* Configure Energy Efficient Ethernet for supported devices */
1141         if (adapter->eee_support)
1142                 ixgbe_setup_eee(hw, adapter->eee_enabled);
1143
1144         /* Config/Enable Link */
1145         ixgbe_config_link(adapter);
1146
1147         /* Hardware Packet Buffer & Flow Control setup */
1148         ixgbe_config_delay_values(adapter);
1149
1150         /* Initialize the FC settings */
1151         ixgbe_start_hw(hw);
1152
1153         /* Set up VLAN support and filter */
1154         ixgbe_setup_vlan_hw_support(adapter);
1155
1156         /* Setup DMA Coalescing */
1157         ixgbe_config_dmac(adapter);
1158
1159         /* And now turn on interrupts */
1160         ixgbe_enable_intr(adapter);
1161
1162         /* Now inform the stack we're ready */
1163         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1164
1165         return;
1166 }
1167
1168 static void
1169 ixgbe_init(void *arg)
1170 {
1171         struct adapter *adapter = arg;
1172
1173         IXGBE_CORE_LOCK(adapter);
1174         ixgbe_init_locked(adapter);
1175         IXGBE_CORE_UNLOCK(adapter);
1176         return;
1177 }
1178
1179 static void
1180 ixgbe_config_delay_values(struct adapter *adapter)
1181 {
1182         struct ixgbe_hw *hw = &adapter->hw;
1183         u32 rxpb, frame, size, tmp;
1184
1185         frame = adapter->max_frame_size;
1186
1187         /* Calculate High Water */
1188         switch (hw->mac.type) {
1189         case ixgbe_mac_X540:
1190         case ixgbe_mac_X550:
1191         case ixgbe_mac_X550EM_x:
1192                 tmp = IXGBE_DV_X540(frame, frame);
1193                 break;
1194         default:
1195                 tmp = IXGBE_DV(frame, frame);
1196                 break;
1197         }
1198         size = IXGBE_BT2KB(tmp);
1199         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200         hw->fc.high_water[0] = rxpb - size;
1201
1202         /* Now calculate Low Water */
1203         switch (hw->mac.type) {
1204         case ixgbe_mac_X540:
1205         case ixgbe_mac_X550:
1206         case ixgbe_mac_X550EM_x:
1207                 tmp = IXGBE_LOW_DV_X540(frame);
1208                 break;
1209         default:
1210                 tmp = IXGBE_LOW_DV(frame);
1211                 break;
1212         }
1213         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1214
1215         hw->fc.requested_mode = adapter->fc;
1216         hw->fc.pause_time = IXGBE_FC_PAUSE;
1217         hw->fc.send_xon = TRUE;
1218 }
1219
1220 /*
1221 **
1222 ** MSIX Interrupt Handlers and Tasklets
1223 **
1224 */
1225
1226 static inline void
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1228 {
1229         struct ixgbe_hw *hw = &adapter->hw;
1230         u64     queue = (u64)(1 << vector);
1231         u32     mask;
1232
1233         if (hw->mac.type == ixgbe_mac_82598EB) {
1234                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1236         } else {
1237                 mask = (queue & 0xFFFFFFFF);
1238                 if (mask)
1239                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240                 mask = (queue >> 32);
1241                 if (mask)
1242                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1243         }
1244 }
1245
1246 static inline void
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1248 {
1249         struct ixgbe_hw *hw = &adapter->hw;
1250         u64     queue = (u64)(1 << vector);
1251         u32     mask;
1252
1253         if (hw->mac.type == ixgbe_mac_82598EB) {
1254                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1256         } else {
1257                 mask = (queue & 0xFFFFFFFF);
1258                 if (mask)
1259                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260                 mask = (queue >> 32);
1261                 if (mask)
1262                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1263         }
1264 }
1265
1266 static void
1267 ixgbe_handle_que(void *context, int pending)
1268 {
1269         struct ix_queue *que = context;
1270         struct adapter  *adapter = que->adapter;
1271         struct tx_ring  *txr = que->txr;
1272         struct ifnet    *ifp = adapter->ifp;
1273         bool            more;
1274
1275         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276                 more = ixgbe_rxeof(que);
1277                 IXGBE_TX_LOCK(txr);
1278                 ixgbe_txeof(txr);
1279 #ifndef IXGBE_LEGACY_TX
1280                 if (!drbr_empty(ifp, txr->br))
1281                         ixgbe_mq_start_locked(ifp, txr);
1282 #else
1283                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284                         ixgbe_start_locked(txr, ifp);
1285 #endif
1286                 IXGBE_TX_UNLOCK(txr);
1287         }
1288
1289         /* Reenable this interrupt */
1290         if (que->res != NULL)
1291                 ixgbe_enable_queue(adapter, que->msix);
1292         else
1293                 ixgbe_enable_intr(adapter);
1294         return;
1295 }
1296
1297
1298 /*********************************************************************
1299  *
1300  *  Legacy Interrupt Service routine
1301  *
1302  **********************************************************************/
1303
1304 static void
1305 ixgbe_legacy_irq(void *arg)
1306 {
1307         struct ix_queue *que = arg;
1308         struct adapter  *adapter = que->adapter;
1309         struct ixgbe_hw *hw = &adapter->hw;
1310         struct ifnet    *ifp = adapter->ifp;
1311         struct          tx_ring *txr = adapter->tx_rings;
1312         bool            more;
1313         u32             reg_eicr;
1314
1315
1316         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1317
1318         ++que->irqs;
1319         if (reg_eicr == 0) {
1320                 ixgbe_enable_intr(adapter);
1321                 return;
1322         }
1323
1324         more = ixgbe_rxeof(que);
1325
1326         IXGBE_TX_LOCK(txr);
1327         ixgbe_txeof(txr);
1328 #ifdef IXGBE_LEGACY_TX
1329         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330                 ixgbe_start_locked(txr, ifp);
1331 #else
1332         if (!drbr_empty(ifp, txr->br))
1333                 ixgbe_mq_start_locked(ifp, txr);
1334 #endif
1335         IXGBE_TX_UNLOCK(txr);
1336
1337         /* Check for fan failure */
1338         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339             (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341                     "REPLACE IMMEDIATELY!!\n");
1342                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1343         }
1344
1345         /* Link status change */
1346         if (reg_eicr & IXGBE_EICR_LSC)
1347                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1348
1349         /* External PHY interrupt */
1350         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1353
1354         if (more)
1355                 taskqueue_enqueue(que->tq, &que->que_task);
1356         else
1357                 ixgbe_enable_intr(adapter);
1358         return;
1359 }
1360
1361
1362 /*********************************************************************
1363  *
1364  *  MSIX Queue Interrupt Service routine
1365  *
1366  **********************************************************************/
1367 void
1368 ixgbe_msix_que(void *arg)
1369 {
1370         struct ix_queue *que = arg;
1371         struct adapter  *adapter = que->adapter;
1372         struct ifnet    *ifp = adapter->ifp;
1373         struct tx_ring  *txr = que->txr;
1374         struct rx_ring  *rxr = que->rxr;
1375         bool            more;
1376         u32             newitr = 0;
1377
1378         /* Protect against spurious interrupts */
1379         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1380                 return;
1381
1382         ixgbe_disable_queue(adapter, que->msix);
1383         ++que->irqs;
1384
1385         more = ixgbe_rxeof(que);
1386
1387         IXGBE_TX_LOCK(txr);
1388         ixgbe_txeof(txr);
1389 #ifdef IXGBE_LEGACY_TX
1390         if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391                 ixgbe_start_locked(txr, ifp);
1392 #else
1393         if (!drbr_empty(ifp, txr->br))
1394                 ixgbe_mq_start_locked(ifp, txr);
1395 #endif
1396         IXGBE_TX_UNLOCK(txr);
1397
1398         /* Do AIM now? */
1399
1400         if (ixgbe_enable_aim == FALSE)
1401                 goto no_calc;
1402         /*
1403         ** Do Adaptive Interrupt Moderation:
1404         **  - Write out last calculated setting
1405         **  - Calculate based on average size over
1406         **    the last interval.
1407         */
1408         if (que->eitr_setting)
1409                 IXGBE_WRITE_REG(&adapter->hw,
1410                     IXGBE_EITR(que->msix), que->eitr_setting);
1411  
1412         que->eitr_setting = 0;
1413
1414         /* Idle, do nothing */
1415         if ((txr->bytes == 0) && (rxr->bytes == 0))
1416                 goto no_calc;
1417                                 
1418         if ((txr->bytes) && (txr->packets))
1419                 newitr = txr->bytes/txr->packets;
1420         if ((rxr->bytes) && (rxr->packets))
1421                 newitr = max(newitr,
1422                     (rxr->bytes / rxr->packets));
1423         newitr += 24; /* account for hardware frame, crc */
1424
1425         /* set an upper boundary */
1426         newitr = min(newitr, 3000);
1427
1428         /* Be nice to the mid range */
1429         if ((newitr > 300) && (newitr < 1200))
1430                 newitr = (newitr / 3);
1431         else
1432                 newitr = (newitr / 2);
1433
1434         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435                 newitr |= newitr << 16;
1436         else
1437                 newitr |= IXGBE_EITR_CNT_WDIS;
1438                  
1439         /* save for next interrupt */
1440         que->eitr_setting = newitr;
1441
1442         /* Reset state */
1443         txr->bytes = 0;
1444         txr->packets = 0;
1445         rxr->bytes = 0;
1446         rxr->packets = 0;
1447
1448 no_calc:
1449         if (more)
1450                 taskqueue_enqueue(que->tq, &que->que_task);
1451         else
1452                 ixgbe_enable_queue(adapter, que->msix);
1453         return;
1454 }
1455
1456
1457 static void
1458 ixgbe_msix_link(void *arg)
1459 {
1460         struct adapter  *adapter = arg;
1461         struct ixgbe_hw *hw = &adapter->hw;
1462         u32             reg_eicr, mod_mask;
1463
1464         ++adapter->link_irq;
1465
1466         /* First get the cause */
1467         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468         /* Be sure the queue bits are not cleared */
1469         reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470         /* Clear interrupt with write */
1471         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1472
1473         /* Link status change */
1474         if (reg_eicr & IXGBE_EICR_LSC)
1475                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1476
1477         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1478 #ifdef IXGBE_FDIR
1479                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480                         /* This is probably overkill :) */
1481                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1482                                 return;
1483                         /* Disable the interrupt */
1484                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1486                 } else
1487 #endif
1488                 if (reg_eicr & IXGBE_EICR_ECC) {
1489                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490                             "Please Reboot!!\n");
1491                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1492                 }
1493
1494                 /* Check for over temp condition */
1495                 if (reg_eicr & IXGBE_EICR_TS) {
1496                         device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497                             "PHY IS SHUT DOWN!!\n");
1498                         device_printf(adapter->dev, "System shutdown required!\n");
1499                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1500                 }
1501         }
1502
1503         /* Pluggable optics-related interrupt */
1504         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505                 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1506         else
1507                 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1508
1509         if (ixgbe_is_sfp(hw)) {
1510                 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513                 } else if (reg_eicr & mod_mask) {
1514                         IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1516                 }
1517         }
1518
1519         /* Check for fan failure */
1520         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524                     "REPLACE IMMEDIATELY!!\n");
1525         }
1526
1527         /* External PHY interrupt */
1528         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1532         }
1533
1534         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1535         return;
1536 }
1537
1538 /*********************************************************************
1539  *
1540  *  Media Ioctl callback
1541  *
1542  *  This routine is called whenever the user queries the status of
1543  *  the interface using ifconfig.
1544  *
1545  **********************************************************************/
1546 static void
1547 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1548 {
1549         struct adapter *adapter = ifp->if_softc;
1550         struct ixgbe_hw *hw = &adapter->hw;
1551         int layer;
1552
1553         INIT_DEBUGOUT("ixgbe_media_status: begin");
1554         IXGBE_CORE_LOCK(adapter);
1555         ixgbe_update_link_status(adapter);
1556
1557         ifmr->ifm_status = IFM_AVALID;
1558         ifmr->ifm_active = IFM_ETHER;
1559
1560         if (!adapter->link_active) {
1561                 IXGBE_CORE_UNLOCK(adapter);
1562                 return;
1563         }
1564
1565         ifmr->ifm_status |= IFM_ACTIVE;
1566         layer = ixgbe_get_supported_physical_layer(hw);
1567
1568         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571                 switch (adapter->link_speed) {
1572                 case IXGBE_LINK_SPEED_10GB_FULL:
1573                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1574                         break;
1575                 case IXGBE_LINK_SPEED_1GB_FULL:
1576                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1577                         break;
1578                 case IXGBE_LINK_SPEED_100_FULL:
1579                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1580                         break;
1581                 }
1582         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584                 switch (adapter->link_speed) {
1585                 case IXGBE_LINK_SPEED_10GB_FULL:
1586                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1587                         break;
1588                 }
1589         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590                 switch (adapter->link_speed) {
1591                 case IXGBE_LINK_SPEED_10GB_FULL:
1592                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1593                         break;
1594                 case IXGBE_LINK_SPEED_1GB_FULL:
1595                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1596                         break;
1597                 }
1598         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599                 switch (adapter->link_speed) {
1600                 case IXGBE_LINK_SPEED_10GB_FULL:
1601                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1602                         break;
1603                 case IXGBE_LINK_SPEED_1GB_FULL:
1604                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1605                         break;
1606                 }
1607         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609                 switch (adapter->link_speed) {
1610                 case IXGBE_LINK_SPEED_10GB_FULL:
1611                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1612                         break;
1613                 case IXGBE_LINK_SPEED_1GB_FULL:
1614                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1615                         break;
1616                 }
1617         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618                 switch (adapter->link_speed) {
1619                 case IXGBE_LINK_SPEED_10GB_FULL:
1620                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1621                         break;
1622                 }
1623         /*
1624         ** XXX: These need to use the proper media types once
1625         ** they're added.
1626         */
1627         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628                 switch (adapter->link_speed) {
1629                 case IXGBE_LINK_SPEED_10GB_FULL:
1630                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1631                         break;
1632                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1633                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1634                         break;
1635                 case IXGBE_LINK_SPEED_1GB_FULL:
1636                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1637                         break;
1638                 }
1639         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640             || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641                 switch (adapter->link_speed) {
1642                 case IXGBE_LINK_SPEED_10GB_FULL:
1643                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1644                         break;
1645                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1646                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1647                         break;
1648                 case IXGBE_LINK_SPEED_1GB_FULL:
1649                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1650                         break;
1651                 }
1652         
1653         /* If nothing is recognized... */
1654         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655                 ifmr->ifm_active |= IFM_UNKNOWN;
1656         
1657 #if __FreeBSD_version >= 900025
1658         /* Display current flow control setting used on link */
1659         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660             hw->fc.current_mode == ixgbe_fc_full)
1661                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663             hw->fc.current_mode == ixgbe_fc_full)
1664                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1665 #endif
1666
1667         IXGBE_CORE_UNLOCK(adapter);
1668
1669         return;
1670 }
1671
1672 /*********************************************************************
1673  *
1674  *  Media Ioctl callback
1675  *
1676  *  This routine is called when the user changes speed/duplex using
1677  *  media/mediopt option with ifconfig.
1678  *
1679  **********************************************************************/
1680 static int
1681 ixgbe_media_change(struct ifnet * ifp)
1682 {
1683         struct adapter *adapter = ifp->if_softc;
1684         struct ifmedia *ifm = &adapter->media;
1685         struct ixgbe_hw *hw = &adapter->hw;
1686         ixgbe_link_speed speed = 0;
1687
1688         INIT_DEBUGOUT("ixgbe_media_change: begin");
1689
1690         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1691                 return (EINVAL);
1692
1693         if (hw->phy.media_type == ixgbe_media_type_backplane)
1694                 return (EPERM);
1695
1696         /*
1697         ** We don't actually need to check against the supported
1698         ** media types of the adapter; ifmedia will take care of
1699         ** that for us.
1700         */
1701         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1702                 case IFM_AUTO:
1703                 case IFM_10G_T:
1704                         speed |= IXGBE_LINK_SPEED_100_FULL;
1705                 case IFM_10G_LRM:
1706                 case IFM_10G_SR: /* KR, too */
1707                 case IFM_10G_LR:
1708                 case IFM_10G_CX4: /* KX4 */
1709                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710                 case IFM_10G_TWINAX:
1711                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
1712                         break;
1713                 case IFM_1000_T:
1714                         speed |= IXGBE_LINK_SPEED_100_FULL;
1715                 case IFM_1000_LX:
1716                 case IFM_1000_SX:
1717                 case IFM_1000_CX: /* KX */
1718                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1719                         break;
1720                 case IFM_100_TX:
1721                         speed |= IXGBE_LINK_SPEED_100_FULL;
1722                         break;
1723                 default:
1724                         goto invalid;
1725         }
1726
1727         hw->mac.autotry_restart = TRUE;
1728         hw->mac.ops.setup_link(hw, speed, TRUE);
1729         adapter->advertise =
1730                 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731                 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732                 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1733
1734         return (0);
1735
1736 invalid:
1737         device_printf(adapter->dev, "Invalid media type!\n");
1738         return (EINVAL);
1739 }
1740
1741 static void
1742 ixgbe_set_promisc(struct adapter *adapter)
1743 {
1744         u_int32_t       reg_rctl;
1745         struct ifnet   *ifp = adapter->ifp;
1746         int             mcnt = 0;
1747
1748         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749         reg_rctl &= (~IXGBE_FCTRL_UPE);
1750         if (ifp->if_flags & IFF_ALLMULTI)
1751                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1752         else {
1753                 struct  ifmultiaddr *ifma;
1754 #if __FreeBSD_version < 800000
1755                 IF_ADDR_LOCK(ifp);
1756 #else
1757                 if_maddr_rlock(ifp);
1758 #endif
1759                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760                         if (ifma->ifma_addr->sa_family != AF_LINK)
1761                                 continue;
1762                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1763                                 break;
1764                         mcnt++;
1765                 }
1766 #if __FreeBSD_version < 800000
1767                 IF_ADDR_UNLOCK(ifp);
1768 #else
1769                 if_maddr_runlock(ifp);
1770 #endif
1771         }
1772         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773                 reg_rctl &= (~IXGBE_FCTRL_MPE);
1774         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1775
1776         if (ifp->if_flags & IFF_PROMISC) {
1777                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779         } else if (ifp->if_flags & IFF_ALLMULTI) {
1780                 reg_rctl |= IXGBE_FCTRL_MPE;
1781                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1782                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1783         }
1784         return;
1785 }
1786
1787
1788 /*********************************************************************
1789  *  Multicast Update
1790  *
1791  *  This routine is called whenever multicast address list is updated.
1792  *
1793  **********************************************************************/
1794 #define IXGBE_RAR_ENTRIES 16
1795
1796 static void
1797 ixgbe_set_multi(struct adapter *adapter)
1798 {
1799         u32     fctrl;
1800         u8      *mta;
1801         u8      *update_ptr;
1802         struct  ifmultiaddr *ifma;
1803         int     mcnt = 0;
1804         struct ifnet   *ifp = adapter->ifp;
1805
1806         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1807
1808         mta = adapter->mta;
1809         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810             MAX_NUM_MULTICAST_ADDRESSES);
1811
1812 #if __FreeBSD_version < 800000
1813         IF_ADDR_LOCK(ifp);
1814 #else
1815         if_maddr_rlock(ifp);
1816 #endif
1817         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818                 if (ifma->ifma_addr->sa_family != AF_LINK)
1819                         continue;
1820                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1821                         break;
1822                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1825                 mcnt++;
1826         }
1827 #if __FreeBSD_version < 800000
1828         IF_ADDR_UNLOCK(ifp);
1829 #else
1830         if_maddr_runlock(ifp);
1831 #endif
1832
1833         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835         if (ifp->if_flags & IFF_PROMISC)
1836                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838             ifp->if_flags & IFF_ALLMULTI) {
1839                 fctrl |= IXGBE_FCTRL_MPE;
1840                 fctrl &= ~IXGBE_FCTRL_UPE;
1841         } else
1842                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843         
1844         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1845
1846         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1847                 update_ptr = mta;
1848                 ixgbe_update_mc_addr_list(&adapter->hw,
1849                     update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1850         }
1851
1852         return;
1853 }
1854
1855 /*
1856  * This is an iterator function now needed by the multicast
1857  * shared code. It simply feeds the shared code routine the
1858  * addresses in the array of ixgbe_set_multi() one by one.
1859  */
1860 static u8 *
1861 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1862 {
1863         u8 *addr = *update_ptr;
1864         u8 *newptr;
1865         *vmdq = 0;
1866
1867         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868         *update_ptr = newptr;
1869         return addr;
1870 }
1871
1872
1873 /*********************************************************************
1874  *  Timer routine
1875  *
1876  *  This routine checks for link status,updates statistics,
1877  *  and runs the watchdog check.
1878  *
1879  **********************************************************************/
1880
1881 static void
1882 ixgbe_local_timer(void *arg)
1883 {
1884         struct adapter  *adapter = arg;
1885         device_t        dev = adapter->dev;
1886         struct ix_queue *que = adapter->queues;
1887         u64             queues = 0;
1888         int             hung = 0;
1889
1890         mtx_assert(&adapter->core_mtx, MA_OWNED);
1891
1892         /* Check for pluggable optics */
1893         if (adapter->sfp_probe)
1894                 if (!ixgbe_sfp_probe(adapter))
1895                         goto out; /* Nothing to do */
1896
1897         ixgbe_update_link_status(adapter);
1898         ixgbe_update_stats_counters(adapter);
1899
1900         /*
1901         ** Check the TX queues status
1902         **      - mark hung queues so we don't schedule on them
1903         **      - watchdog only if all queues show hung
1904         */          
1905         for (int i = 0; i < adapter->num_queues; i++, que++) {
1906                 /* Keep track of queues with work for soft irq */
1907                 if (que->txr->busy)
1908                         queues |= ((u64)1 << que->me);
1909                 /*
1910                 ** Each time txeof runs without cleaning, but there
1911                 ** are uncleaned descriptors it increments busy. If
1912                 ** we get to the MAX we declare it hung.
1913                 */
1914                 if (que->busy == IXGBE_QUEUE_HUNG) {
1915                         ++hung;
1916                         /* Mark the queue as inactive */
1917                         adapter->active_queues &= ~((u64)1 << que->me);
1918                         continue;
1919                 } else {
1920                         /* Check if we've come back from hung */
1921                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922                                 adapter->active_queues |= ((u64)1 << que->me);
1923                 }
1924                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925                         device_printf(dev,"Warning queue %d "
1926                             "appears to be hung!\n", i);
1927                         que->txr->busy = IXGBE_QUEUE_HUNG;
1928                         ++hung;
1929                 }
1930
1931         }
1932
1933         /* Only truly watchdog if all queues show hung */
1934         if (hung == adapter->num_queues)
1935                 goto watchdog;
1936         else if (queues != 0) { /* Force an IRQ on queues with work */
1937                 ixgbe_rearm_queues(adapter, queues);
1938         }
1939
1940 out:
1941         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1942         return;
1943
1944 watchdog:
1945         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947         adapter->watchdog_events++;
1948         ixgbe_init_locked(adapter);
1949 }
1950
1951 /*
1952 ** Note: this routine updates the OS on the link state
1953 **      the real check of the hardware only happens with
1954 **      a link interrupt.
1955 */
1956 static void
1957 ixgbe_update_link_status(struct adapter *adapter)
1958 {
1959         struct ifnet    *ifp = adapter->ifp;
1960         device_t dev = adapter->dev;
1961
1962         if (adapter->link_up){ 
1963                 if (adapter->link_active == FALSE) {
1964                         if (bootverbose)
1965                                 device_printf(dev,"Link is up %d Gbps %s \n",
1966                                     ((adapter->link_speed == 128)? 10:1),
1967                                     "Full Duplex");
1968                         adapter->link_active = TRUE;
1969                         /* Update any Flow Control changes */
1970                         ixgbe_fc_enable(&adapter->hw);
1971                         /* Update DMA coalescing config */
1972                         ixgbe_config_dmac(adapter);
1973                         if_link_state_change(ifp, LINK_STATE_UP);
1974                 }
1975         } else { /* Link down */
1976                 if (adapter->link_active == TRUE) {
1977                         if (bootverbose)
1978                                 device_printf(dev,"Link is Down\n");
1979                         if_link_state_change(ifp, LINK_STATE_DOWN);
1980                         adapter->link_active = FALSE;
1981                 }
1982         }
1983
1984         return;
1985 }
1986
1987
1988 /*********************************************************************
1989  *
1990  *  This routine disables all traffic on the adapter by issuing a
1991  *  global reset on the MAC and deallocates TX/RX buffers.
1992  *
1993  **********************************************************************/
1994
1995 static void
1996 ixgbe_stop(void *arg)
1997 {
1998         struct ifnet   *ifp;
1999         struct adapter *adapter = arg;
2000         struct ixgbe_hw *hw = &adapter->hw;
2001         ifp = adapter->ifp;
2002
2003         mtx_assert(&adapter->core_mtx, MA_OWNED);
2004
2005         INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006         ixgbe_disable_intr(adapter);
2007         callout_stop(&adapter->timer);
2008
2009         /* Let the stack know...*/
2010         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2011
2012         ixgbe_reset_hw(hw);
2013         hw->adapter_stopped = FALSE;
2014         ixgbe_stop_adapter(hw);
2015         if (hw->mac.type == ixgbe_mac_82599EB)
2016                 ixgbe_stop_mac_link_on_d3_82599(hw);
2017         /* Turn off the laser - noop with no optics */
2018         ixgbe_disable_tx_laser(hw);
2019
2020         /* Update the stack */
2021         adapter->link_up = FALSE;
2022         ixgbe_update_link_status(adapter);
2023
2024         /* reprogram the RAR[0] in case user changed it. */
2025         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2026
2027         return;
2028 }
2029
2030
2031 /*********************************************************************
2032  *
2033  *  Determine hardware revision.
2034  *
2035  **********************************************************************/
2036 static void
2037 ixgbe_identify_hardware(struct adapter *adapter)
2038 {
2039         device_t        dev = adapter->dev;
2040         struct ixgbe_hw *hw = &adapter->hw;
2041
2042         /* Save off the information about this board */
2043         hw->vendor_id = pci_get_vendor(dev);
2044         hw->device_id = pci_get_device(dev);
2045         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046         hw->subsystem_vendor_id =
2047             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048         hw->subsystem_device_id =
2049             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2050
2051         /*
2052         ** Make sure BUSMASTER is set
2053         */
2054         pci_enable_busmaster(dev);
2055
2056         /* We need this here to set the num_segs below */
2057         ixgbe_set_mac_type(hw);
2058
2059         /* Pick up the 82599 settings */
2060         if (hw->mac.type != ixgbe_mac_82598EB) {
2061                 hw->phy.smart_speed = ixgbe_smart_speed;
2062                 adapter->num_segs = IXGBE_82599_SCATTER;
2063         } else
2064                 adapter->num_segs = IXGBE_82598_SCATTER;
2065
2066         return;
2067 }
2068
2069 /*********************************************************************
2070  *
2071  *  Determine optic type
2072  *
2073  **********************************************************************/
2074 static void
2075 ixgbe_setup_optics(struct adapter *adapter)
2076 {
2077         struct ixgbe_hw *hw = &adapter->hw;
2078         int             layer;
2079
2080         layer = ixgbe_get_supported_physical_layer(hw);
2081
2082         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083                 adapter->optics = IFM_10G_T;
2084                 return;
2085         }
2086
2087         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088                 adapter->optics = IFM_1000_T;
2089                 return;
2090         }
2091
2092         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093                 adapter->optics = IFM_1000_SX;
2094                 return;
2095         }
2096
2097         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098             IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099                 adapter->optics = IFM_10G_LR;
2100                 return;
2101         }
2102
2103         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104                 adapter->optics = IFM_10G_SR;
2105                 return;
2106         }
2107
2108         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109                 adapter->optics = IFM_10G_TWINAX;
2110                 return;
2111         }
2112
2113         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114             IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115                 adapter->optics = IFM_10G_CX4;
2116                 return;
2117         }
2118
2119         /* If we get here just set the default */
2120         adapter->optics = IFM_ETHER | IFM_AUTO;
2121         return;
2122 }
2123
2124 /*********************************************************************
2125  *
2126  *  Setup the Legacy or MSI Interrupt handler
2127  *
2128  **********************************************************************/
2129 static int
2130 ixgbe_allocate_legacy(struct adapter *adapter)
2131 {
2132         device_t        dev = adapter->dev;
2133         struct          ix_queue *que = adapter->queues;
2134 #ifndef IXGBE_LEGACY_TX
2135         struct tx_ring          *txr = adapter->tx_rings;
2136 #endif
2137         int             error, rid = 0;
2138
2139         /* MSI RID at 1 */
2140         if (adapter->msix == 1)
2141                 rid = 1;
2142
2143         /* We allocate a single interrupt resource */
2144         adapter->res = bus_alloc_resource_any(dev,
2145             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146         if (adapter->res == NULL) {
2147                 device_printf(dev, "Unable to allocate bus resource: "
2148                     "interrupt\n");
2149                 return (ENXIO);
2150         }
2151
2152         /*
2153          * Try allocating a fast interrupt and the associated deferred
2154          * processing contexts.
2155          */
2156 #ifndef IXGBE_LEGACY_TX
2157         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2158 #endif
2159         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161             taskqueue_thread_enqueue, &que->tq);
2162         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163             device_get_nameunit(adapter->dev));
2164
2165         /* Tasklets for Link, SFP and Multispeed Fiber */
2166         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2170 #ifdef IXGBE_FDIR
2171         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2172 #endif
2173         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174             taskqueue_thread_enqueue, &adapter->tq);
2175         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176             device_get_nameunit(adapter->dev));
2177
2178         if ((error = bus_setup_intr(dev, adapter->res,
2179             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180             que, &adapter->tag)) != 0) {
2181                 device_printf(dev, "Failed to register fast interrupt "
2182                     "handler: %d\n", error);
2183                 taskqueue_free(que->tq);
2184                 taskqueue_free(adapter->tq);
2185                 que->tq = NULL;
2186                 adapter->tq = NULL;
2187                 return (error);
2188         }
2189         /* For simplicity in the handlers */
2190         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2191
2192         return (0);
2193 }
2194
2195
2196 /*********************************************************************
2197  *
2198  *  Setup MSIX Interrupt resources and handlers 
2199  *
2200  **********************************************************************/
2201 static int
2202 ixgbe_allocate_msix(struct adapter *adapter)
2203 {
2204         device_t        dev = adapter->dev;
2205         struct          ix_queue *que = adapter->queues;
2206         struct          tx_ring *txr = adapter->tx_rings;
2207         int             error, rid, vector = 0;
2208         int             cpu_id = 0;
2209
2210         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2211                 rid = vector + 1;
2212                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213                     RF_SHAREABLE | RF_ACTIVE);
2214                 if (que->res == NULL) {
2215                         device_printf(dev,"Unable to allocate"
2216                             " bus resource: que interrupt [%d]\n", vector);
2217                         return (ENXIO);
2218                 }
2219                 /* Set the handler function */
2220                 error = bus_setup_intr(dev, que->res,
2221                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222                     ixgbe_msix_que, que, &que->tag);
2223                 if (error) {
2224                         que->res = NULL;
2225                         device_printf(dev, "Failed to register QUE handler");
2226                         return (error);
2227                 }
2228 #if __FreeBSD_version >= 800504
2229                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2230 #endif
2231                 que->msix = vector;
2232                 adapter->active_queues |= (u64)(1 << que->msix);
2233                 /*
2234                  * Bind the msix vector, and thus the
2235                  * rings to the corresponding cpu.
2236                  *
2237                  * This just happens to match the default RSS round-robin
2238                  * bucket -> queue -> CPU allocation.
2239                  */
2240                 if (adapter->num_queues > 1)
2241                         cpu_id = i;
2242
2243                 if (adapter->num_queues > 1)
2244                         bus_bind_intr(dev, que->res, cpu_id);
2245
2246 #ifndef IXGBE_LEGACY_TX
2247                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2248 #endif
2249                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251                     taskqueue_thread_enqueue, &que->tq);
2252                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253                     device_get_nameunit(adapter->dev));
2254         }
2255
2256         /* and Link */
2257         rid = vector + 1;
2258         adapter->res = bus_alloc_resource_any(dev,
2259             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260         if (!adapter->res) {
2261                 device_printf(dev,"Unable to allocate"
2262             " bus resource: Link interrupt [%d]\n", rid);
2263                 return (ENXIO);
2264         }
2265         /* Set the link handler function */
2266         error = bus_setup_intr(dev, adapter->res,
2267             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268             ixgbe_msix_link, adapter, &adapter->tag);
2269         if (error) {
2270                 adapter->res = NULL;
2271                 device_printf(dev, "Failed to register LINK handler");
2272                 return (error);
2273         }
2274 #if __FreeBSD_version >= 800504
2275         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2276 #endif
2277         adapter->vector = vector;
2278         /* Tasklets for Link, SFP and Multispeed Fiber */
2279         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2283 #ifdef IXGBE_FDIR
2284         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2285 #endif
2286         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287             taskqueue_thread_enqueue, &adapter->tq);
2288         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289             device_get_nameunit(adapter->dev));
2290
2291         return (0);
2292 }
2293
2294 /*
2295  * Setup Either MSI/X or MSI
2296  */
2297 static int
2298 ixgbe_setup_msix(struct adapter *adapter)
2299 {
2300         device_t dev = adapter->dev;
2301         int rid, want, queues, msgs;
2302
2303         /* Override by tuneable */
2304         if (ixgbe_enable_msix == 0)
2305                 goto msi;
2306
2307         /* First try MSI/X */
2308         msgs = pci_msix_count(dev); 
2309         if (msgs == 0)
2310                 goto msi;
2311         rid = PCIR_BAR(MSIX_82598_BAR);
2312         adapter->msix_mem = bus_alloc_resource_any(dev,
2313             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314         if (adapter->msix_mem == NULL) {
2315                 rid += 4;       /* 82599 maps in higher BAR */
2316                 adapter->msix_mem = bus_alloc_resource_any(dev,
2317                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2318         }
2319         if (adapter->msix_mem == NULL) {
2320                 /* May not be enabled */
2321                 device_printf(adapter->dev,
2322                     "Unable to map MSIX table \n");
2323                 goto msi;
2324         }
2325
2326         /* Figure out a reasonable auto config value */
2327         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2328
2329         if (ixgbe_num_queues != 0)
2330                 queues = ixgbe_num_queues;
2331         /* Set max queues to 8 when autoconfiguring */
2332         else if ((ixgbe_num_queues == 0) && (queues > 8))
2333                 queues = 8;
2334
2335         /* reflect correct sysctl value */
2336         ixgbe_num_queues = queues;
2337
2338         /*
2339         ** Want one vector (RX/TX pair) per queue
2340         ** plus an additional for Link.
2341         */
2342         want = queues + 1;
2343         if (msgs >= want)
2344                 msgs = want;
2345         else {
2346                 device_printf(adapter->dev,
2347                     "MSIX Configuration Problem, "
2348                     "%d vectors but %d queues wanted!\n",
2349                     msgs, want);
2350                 goto msi;
2351         }
2352         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2353                 device_printf(adapter->dev,
2354                     "Using MSIX interrupts with %d vectors\n", msgs);
2355                 adapter->num_queues = queues;
2356                 return (msgs);
2357         }
2358         /*
2359         ** If MSIX alloc failed or provided us with
2360         ** less than needed, free and fall through to MSI
2361         */
2362         pci_release_msi(dev);
2363
2364 msi:
2365         if (adapter->msix_mem != NULL) {
2366                 bus_release_resource(dev, SYS_RES_MEMORY,
2367                     rid, adapter->msix_mem);
2368                 adapter->msix_mem = NULL;
2369         }
2370         msgs = 1;
2371         if (pci_alloc_msi(dev, &msgs) == 0) {
2372                 device_printf(adapter->dev,"Using an MSI interrupt\n");
2373                 return (msgs);
2374         }
2375         device_printf(adapter->dev,"Using a Legacy interrupt\n");
2376         return (0);
2377 }
2378
2379
2380 static int
2381 ixgbe_allocate_pci_resources(struct adapter *adapter)
2382 {
2383         int             rid;
2384         device_t        dev = adapter->dev;
2385
2386         rid = PCIR_BAR(0);
2387         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2388             &rid, RF_ACTIVE);
2389
2390         if (!(adapter->pci_mem)) {
2391                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2392                 return (ENXIO);
2393         }
2394
2395         adapter->osdep.mem_bus_space_tag =
2396                 rman_get_bustag(adapter->pci_mem);
2397         adapter->osdep.mem_bus_space_handle =
2398                 rman_get_bushandle(adapter->pci_mem);
2399         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2400
2401         /* Legacy defaults */
2402         adapter->num_queues = 1;
2403         adapter->hw.back = &adapter->osdep;
2404
2405         /*
2406         ** Now setup MSI or MSI/X, should
2407         ** return us the number of supported
2408         ** vectors. (Will be 1 for MSI)
2409         */
2410         adapter->msix = ixgbe_setup_msix(adapter);
2411         return (0);
2412 }
2413
2414 static void
2415 ixgbe_free_pci_resources(struct adapter * adapter)
2416 {
2417         struct          ix_queue *que = adapter->queues;
2418         device_t        dev = adapter->dev;
2419         int             rid, memrid;
2420
2421         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2422                 memrid = PCIR_BAR(MSIX_82598_BAR);
2423         else
2424                 memrid = PCIR_BAR(MSIX_82599_BAR);
2425
2426         /*
2427         ** There is a slight possibility of a failure mode
2428         ** in attach that will result in entering this function
2429         ** before interrupt resources have been initialized, and
2430         ** in that case we do not want to execute the loops below
2431         ** We can detect this reliably by the state of the adapter
2432         ** res pointer.
2433         */
2434         if (adapter->res == NULL)
2435                 goto mem;
2436
2437         /*
2438         **  Release all msix queue resources:
2439         */
2440         for (int i = 0; i < adapter->num_queues; i++, que++) {
2441                 rid = que->msix + 1;
2442                 if (que->tag != NULL) {
2443                         bus_teardown_intr(dev, que->res, que->tag);
2444                         que->tag = NULL;
2445                 }
2446                 if (que->res != NULL)
2447                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2448         }
2449
2450
2451         /* Clean the Legacy or Link interrupt last */
2452         if (adapter->vector) /* we are doing MSIX */
2453                 rid = adapter->vector + 1;
2454         else
2455                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2456
2457         if (adapter->tag != NULL) {
2458                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2459                 adapter->tag = NULL;
2460         }
2461         if (adapter->res != NULL)
2462                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2463
2464 mem:
2465         if (adapter->msix)
2466                 pci_release_msi(dev);
2467
2468         if (adapter->msix_mem != NULL)
2469                 bus_release_resource(dev, SYS_RES_MEMORY,
2470                     memrid, adapter->msix_mem);
2471
2472         if (adapter->pci_mem != NULL)
2473                 bus_release_resource(dev, SYS_RES_MEMORY,
2474                     PCIR_BAR(0), adapter->pci_mem);
2475
2476         return;
2477 }
2478
2479 /*********************************************************************
2480  *
2481  *  Setup networking device structure and register an interface.
2482  *
2483  **********************************************************************/
2484 static int
2485 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2486 {
2487         struct ifnet   *ifp;
2488
2489         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2490
2491         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2492         if (ifp == NULL) {
2493                 device_printf(dev, "can not allocate ifnet structure\n");
2494                 return (-1);
2495         }
2496         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2497         if_initbaudrate(ifp, IF_Gbps(10));
2498         ifp->if_init = ixgbe_init;
2499         ifp->if_softc = adapter;
2500         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2501         ifp->if_ioctl = ixgbe_ioctl;
2502 #ifndef IXGBE_LEGACY_TX
2503         ifp->if_transmit = ixgbe_mq_start;
2504         ifp->if_qflush = ixgbe_qflush;
2505 #else
2506         ifp->if_start = ixgbe_start;
2507         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2508         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2509         IFQ_SET_READY(&ifp->if_snd);
2510 #endif
2511
2512         ether_ifattach(ifp, adapter->hw.mac.addr);
2513
2514         adapter->max_frame_size =
2515             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2516
2517         /*
2518          * Tell the upper layer(s) we support long frames.
2519          */
2520         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2521
2522         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2523         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2524         ifp->if_capabilities |= IFCAP_LRO;
2525         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2526                              |  IFCAP_VLAN_HWTSO
2527                              |  IFCAP_VLAN_MTU
2528                              |  IFCAP_HWSTATS;
2529         ifp->if_capenable = ifp->if_capabilities;
2530
2531         /*
2532         ** Don't turn this on by default, if vlans are
2533         ** created on another pseudo device (eg. lagg)
2534         ** then vlan events are not passed thru, breaking
2535         ** operation, but with HW FILTER off it works. If
2536         ** using vlans directly on the ixgbe driver you can
2537         ** enable this and get full hardware tag filtering.
2538         */
2539         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2540
2541         /*
2542          * Specify the media types supported by this adapter and register
2543          * callbacks to update media and link information
2544          */
2545         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2546                     ixgbe_media_status);
2547
2548         ixgbe_add_media_types(adapter);
2549
2550         /* Autoselect media by default */
2551         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2552
2553         return (0);
2554 }
2555
2556 static void
2557 ixgbe_add_media_types(struct adapter *adapter)
2558 {
2559         struct ixgbe_hw *hw = &adapter->hw;
2560         device_t dev = adapter->dev;
2561         int layer;
2562
2563         layer = ixgbe_get_supported_physical_layer(hw);
2564
2565         /* Media types with matching FreeBSD media defines */
2566         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2567                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2568         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2569                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2570         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2571                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2572         
2573         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2574             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2575                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2576
2577         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2578                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2579         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2580                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2581         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2582                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2583         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2584                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2585
2586         /*
2587         ** Other (no matching FreeBSD media type):
2588         ** To workaround this, we'll assign these completely
2589         ** inappropriate media types.
2590         */
2591         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2592                 device_printf(dev, "Media supported: 10GbaseKR\n");
2593                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2594                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2595         }
2596         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2597                 device_printf(dev, "Media supported: 10GbaseKX4\n");
2598                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2599                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2600         }
2601         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2602                 device_printf(dev, "Media supported: 1000baseKX\n");
2603                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2604                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2605         }
2606         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2607                 /* Someday, someone will care about you... */
2608                 device_printf(dev, "Media supported: 1000baseBX\n");
2609         }
2610         
2611         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2612                 ifmedia_add(&adapter->media,
2613                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2614                 ifmedia_add(&adapter->media,
2615                     IFM_ETHER | IFM_1000_T, 0, NULL);
2616         }
2617
2618         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2619 }
2620
2621 static void
2622 ixgbe_config_link(struct adapter *adapter)
2623 {
2624         struct ixgbe_hw *hw = &adapter->hw;
2625         u32     autoneg, err = 0;
2626         bool    sfp, negotiate;
2627
2628         sfp = ixgbe_is_sfp(hw);
2629
2630         if (sfp) { 
2631                 if (hw->phy.multispeed_fiber) {
2632                         hw->mac.ops.setup_sfp(hw);
2633                         ixgbe_enable_tx_laser(hw);
2634                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2635                 } else
2636                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2637         } else {
2638                 if (hw->mac.ops.check_link)
2639                         err = ixgbe_check_link(hw, &adapter->link_speed,
2640                             &adapter->link_up, FALSE);
2641                 if (err)
2642                         goto out;
2643                 autoneg = hw->phy.autoneg_advertised;
2644                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2645                         err  = hw->mac.ops.get_link_capabilities(hw,
2646                             &autoneg, &negotiate);
2647                 if (err)
2648                         goto out;
2649                 if (hw->mac.ops.setup_link)
2650                         err = hw->mac.ops.setup_link(hw,
2651                             autoneg, adapter->link_up);
2652         }
2653 out:
2654         return;
2655 }
2656
2657
2658 /*********************************************************************
2659  *
2660  *  Enable transmit units.
2661  *
2662  **********************************************************************/
2663 static void
2664 ixgbe_initialize_transmit_units(struct adapter *adapter)
2665 {
2666         struct tx_ring  *txr = adapter->tx_rings;
2667         struct ixgbe_hw *hw = &adapter->hw;
2668
2669         /* Setup the Base and Length of the Tx Descriptor Ring */
2670
2671         for (int i = 0; i < adapter->num_queues; i++, txr++) {
2672                 u64     tdba = txr->txdma.dma_paddr;
2673                 u32     txctrl = 0;
2674
2675                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2676                        (tdba & 0x00000000ffffffffULL));
2677                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2678                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2679                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2680
2681                 /* Setup the HW Tx Head and Tail descriptor pointers */
2682                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2683                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2684
2685                 /* Cache the tail address */
2686                 txr->tail = IXGBE_TDT(txr->me);
2687
2688                 /* Set the processing limit */
2689                 txr->process_limit = ixgbe_tx_process_limit;
2690
2691                 /* Disable Head Writeback */
2692                 switch (hw->mac.type) {
2693                 case ixgbe_mac_82598EB:
2694                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2695                         break;
2696                 case ixgbe_mac_82599EB:
2697                 case ixgbe_mac_X540:
2698                 default:
2699                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2700                         break;
2701                 }
2702                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2703                 switch (hw->mac.type) {
2704                 case ixgbe_mac_82598EB:
2705                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2706                         break;
2707                 case ixgbe_mac_82599EB:
2708                 case ixgbe_mac_X540:
2709                 default:
2710                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2711                         break;
2712                 }
2713
2714         }
2715
2716         if (hw->mac.type != ixgbe_mac_82598EB) {
2717                 u32 dmatxctl, rttdcs;
2718                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2719                 dmatxctl |= IXGBE_DMATXCTL_TE;
2720                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2721                 /* Disable arbiter to set MTQC */
2722                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2723                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2724                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2725                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2726                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2727                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2728         }
2729
2730         return;
2731 }
2732
2733 static void
2734 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2735 {
2736         struct ixgbe_hw *hw = &adapter->hw;
2737         uint32_t reta;
2738         int i, j, queue_id, table_size;
2739         int index_mult;
2740         uint32_t rss_key[10];
2741         uint32_t mrqc;
2742
2743         /* Setup RSS */
2744         reta = 0;
2745
2746         /* set up random bits */
2747         arc4rand(&rss_key, sizeof(rss_key), 0);
2748
2749         /* Set multiplier for RETA setup and table size based on MAC */
2750         index_mult = 0x1;
2751         table_size = 128;
2752         switch (adapter->hw.mac.type) {
2753         case ixgbe_mac_82598EB:
2754                 index_mult = 0x11;
2755                 break;
2756         case ixgbe_mac_X550:
2757         case ixgbe_mac_X550EM_x:
2758                 table_size = 512;
2759                 break;
2760         default:
2761                 break;
2762         }
2763
2764         /* Set up the redirection table */
2765         for (i = 0, j = 0; i < table_size; i++, j++) {
2766                 if (j == adapter->num_queues) j = 0;
2767                 queue_id = (j * index_mult);
2768                 /*
2769                  * The low 8 bits are for hash value (n+0);
2770                  * The next 8 bits are for hash value (n+1), etc.
2771                  */
2772                 reta = reta >> 8;
2773                 reta = reta | ( ((uint32_t) queue_id) << 24);
2774                 if ((i & 3) == 3) {
2775                         if (i < 128)
2776                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2777                         else
2778                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2779                         reta = 0;
2780                 }
2781         }
2782
2783         /* Now fill our hash function seeds */
2784         for (int i = 0; i < 10; i++)
2785                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2786
2787         /* Perform hash on these packet types */
2788         /*
2789          * Disable UDP - IP fragments aren't currently being handled
2790          * and so we end up with a mix of 2-tuple and 4-tuple
2791          * traffic.
2792          */
2793         mrqc = IXGBE_MRQC_RSSEN
2794              | IXGBE_MRQC_RSS_FIELD_IPV4
2795              | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2796 #if 0
2797              | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2798 #endif
2799              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2800              | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2801              | IXGBE_MRQC_RSS_FIELD_IPV6
2802              | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2803 #if 0
2804              | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2805              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2806 #endif
2807         ;
2808         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2809 }
2810
2811
2812 /*********************************************************************
2813  *
2814  *  Setup receive registers and features.
2815  *
2816  **********************************************************************/
2817 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2818
2819 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2820         
2821 static void
2822 ixgbe_initialize_receive_units(struct adapter *adapter)
2823 {
2824         struct  rx_ring *rxr = adapter->rx_rings;
2825         struct ixgbe_hw *hw = &adapter->hw;
2826         struct ifnet   *ifp = adapter->ifp;
2827         u32             bufsz, fctrl, srrctl, rxcsum;
2828         u32             hlreg;
2829
2830
2831         /*
2832          * Make sure receives are disabled while
2833          * setting up the descriptor ring
2834          */
2835         ixgbe_disable_rx(hw);
2836
2837         /* Enable broadcasts */
2838         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2839         fctrl |= IXGBE_FCTRL_BAM;
2840         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2841                 fctrl |= IXGBE_FCTRL_DPF;
2842                 fctrl |= IXGBE_FCTRL_PMCF;
2843         }
2844         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2845
2846         /* Set for Jumbo Frames? */
2847         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2848         if (ifp->if_mtu > ETHERMTU)
2849                 hlreg |= IXGBE_HLREG0_JUMBOEN;
2850         else
2851                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2852 #ifdef DEV_NETMAP
2853         /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2854         if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2855                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2856         else
2857                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2858 #endif /* DEV_NETMAP */
2859         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2860
2861         bufsz = (adapter->rx_mbuf_sz +
2862             BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2863
2864         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2865                 u64 rdba = rxr->rxdma.dma_paddr;
2866
2867                 /* Setup the Base and Length of the Rx Descriptor Ring */
2868                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2869                                (rdba & 0x00000000ffffffffULL));
2870                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2871                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2872                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2873
2874                 /* Set up the SRRCTL register */
2875                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2876                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2877                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2878                 srrctl |= bufsz;
2879                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2880
2881                 /*
2882                  * Set DROP_EN iff we have no flow control and >1 queue.
2883                  * Note that srrctl was cleared shortly before during reset,
2884                  * so we do not need to clear the bit, but do it just in case
2885                  * this code is moved elsewhere.
2886                  */
2887                 if (adapter->num_queues > 1 &&
2888                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2889                         srrctl |= IXGBE_SRRCTL_DROP_EN;
2890                 } else {
2891                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2892                 }
2893
2894                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2895
2896                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2897                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2898                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2899
2900                 /* Set the processing limit */
2901                 rxr->process_limit = ixgbe_rx_process_limit;
2902
2903                 /* Set the driver rx tail address */
2904                 rxr->tail =  IXGBE_RDT(rxr->me);
2905         }
2906
2907         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2908                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2909                               IXGBE_PSRTYPE_UDPHDR |
2910                               IXGBE_PSRTYPE_IPV4HDR |
2911                               IXGBE_PSRTYPE_IPV6HDR;
2912                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2913         }
2914
2915         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2916
2917         ixgbe_initialise_rss_mapping(adapter);
2918
2919         if (adapter->num_queues > 1) {
2920                 /* RSS and RX IPP Checksum are mutually exclusive */
2921                 rxcsum |= IXGBE_RXCSUM_PCSD;
2922         }
2923
2924         if (ifp->if_capenable & IFCAP_RXCSUM)
2925                 rxcsum |= IXGBE_RXCSUM_PCSD;
2926
2927         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2928                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2929
2930         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2931
2932         return;
2933 }
2934
2935
2936 /*
2937 ** This routine is run via an vlan config EVENT,
2938 ** it enables us to use the HW Filter table since
2939 ** we can get the vlan id. This just creates the
2940 ** entry in the soft version of the VFTA, init will
2941 ** repopulate the real table.
2942 */
2943 static void
2944 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2945 {
2946         struct adapter  *adapter = ifp->if_softc;
2947         u16             index, bit;
2948
2949         if (ifp->if_softc !=  arg)   /* Not our event */
2950                 return;
2951
2952         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2953                 return;
2954
2955         IXGBE_CORE_LOCK(adapter);
2956         index = (vtag >> 5) & 0x7F;
2957         bit = vtag & 0x1F;
2958         adapter->shadow_vfta[index] |= (1 << bit);
2959         ++adapter->num_vlans;
2960         ixgbe_setup_vlan_hw_support(adapter);
2961         IXGBE_CORE_UNLOCK(adapter);
2962 }
2963
2964 /*
2965 ** This routine is run via an vlan
2966 ** unconfig EVENT, remove our entry
2967 ** in the soft vfta.
2968 */
2969 static void
2970 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2971 {
2972         struct adapter  *adapter = ifp->if_softc;
2973         u16             index, bit;
2974
2975         if (ifp->if_softc !=  arg)
2976                 return;
2977
2978         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2979                 return;
2980
2981         IXGBE_CORE_LOCK(adapter);
2982         index = (vtag >> 5) & 0x7F;
2983         bit = vtag & 0x1F;
2984         adapter->shadow_vfta[index] &= ~(1 << bit);
2985         --adapter->num_vlans;
2986         /* Re-init to load the changes */
2987         ixgbe_setup_vlan_hw_support(adapter);
2988         IXGBE_CORE_UNLOCK(adapter);
2989 }
2990
2991 static void
2992 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2993 {
2994         struct ifnet    *ifp = adapter->ifp;
2995         struct ixgbe_hw *hw = &adapter->hw;
2996         struct rx_ring  *rxr;
2997         u32             ctrl;
2998
2999
3000         /*
3001         ** We get here thru init_locked, meaning
3002         ** a soft reset, this has already cleared
3003         ** the VFTA and other state, so if there
3004         ** have been no vlan's registered do nothing.
3005         */
3006         if (adapter->num_vlans == 0)
3007                 return;
3008
3009         /* Setup the queues for vlans */
3010         for (int i = 0; i < adapter->num_queues; i++) {
3011                 rxr = &adapter->rx_rings[i];
3012                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3013                 if (hw->mac.type != ixgbe_mac_82598EB) {
3014                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3015                         ctrl |= IXGBE_RXDCTL_VME;
3016                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3017                 }
3018                 rxr->vtag_strip = TRUE;
3019         }
3020
3021         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3022                 return;
3023         /*
3024         ** A soft reset zero's out the VFTA, so
3025         ** we need to repopulate it now.
3026         */
3027         for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3028                 if (adapter->shadow_vfta[i] != 0)
3029                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3030                             adapter->shadow_vfta[i]);
3031
3032         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3033         /* Enable the Filter Table if enabled */
3034         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3035                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3036                 ctrl |= IXGBE_VLNCTRL_VFE;
3037         }
3038         if (hw->mac.type == ixgbe_mac_82598EB)
3039                 ctrl |= IXGBE_VLNCTRL_VME;
3040         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3041 }
3042
3043 static void
3044 ixgbe_enable_intr(struct adapter *adapter)
3045 {
3046         struct ixgbe_hw *hw = &adapter->hw;
3047         struct ix_queue *que = adapter->queues;
3048         u32             mask, fwsm;
3049
3050         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3051         /* Enable Fan Failure detection */
3052         if (hw->device_id == IXGBE_DEV_ID_82598AT)
3053                     mask |= IXGBE_EIMS_GPI_SDP1;
3054
3055         switch (adapter->hw.mac.type) {
3056                 case ixgbe_mac_82599EB:
3057                         mask |= IXGBE_EIMS_ECC;
3058                         /* Temperature sensor on some adapters */
3059                         mask |= IXGBE_EIMS_GPI_SDP0;
3060                         /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3061                         mask |= IXGBE_EIMS_GPI_SDP1;
3062                         mask |= IXGBE_EIMS_GPI_SDP2;
3063 #ifdef IXGBE_FDIR
3064                         mask |= IXGBE_EIMS_FLOW_DIR;
3065 #endif
3066                         break;
3067                 case ixgbe_mac_X540:
3068                         /* Detect if Thermal Sensor is enabled */
3069                         fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3070                         if (fwsm & IXGBE_FWSM_TS_ENABLED)
3071                                 mask |= IXGBE_EIMS_TS;
3072                         mask |= IXGBE_EIMS_ECC;
3073 #ifdef IXGBE_FDIR
3074                         mask |= IXGBE_EIMS_FLOW_DIR;
3075 #endif
3076                         break;
3077                 case ixgbe_mac_X550:
3078                 case ixgbe_mac_X550EM_x:
3079                         /* MAC thermal sensor is automatically enabled */
3080                         mask |= IXGBE_EIMS_TS;
3081                         /* Some devices use SDP0 for important information */
3082                         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3083                             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3084                                 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3085                         mask |= IXGBE_EIMS_ECC;
3086 #ifdef IXGBE_FDIR
3087                         mask |= IXGBE_EIMS_FLOW_DIR;
3088 #endif
3089                 /* falls through */
3090                 default:
3091                         break;
3092         }
3093
3094         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3095
3096         /* With MSI-X we use auto clear */
3097         if (adapter->msix_mem) {
3098                 mask = IXGBE_EIMS_ENABLE_MASK;
3099                 /* Don't autoclear Link */
3100                 mask &= ~IXGBE_EIMS_OTHER;
3101                 mask &= ~IXGBE_EIMS_LSC;
3102                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3103         }
3104
3105         /*
3106         ** Now enable all queues, this is done separately to
3107         ** allow for handling the extended (beyond 32) MSIX
3108         ** vectors that can be used by 82599
3109         */
3110         for (int i = 0; i < adapter->num_queues; i++, que++)
3111                 ixgbe_enable_queue(adapter, que->msix);
3112
3113         IXGBE_WRITE_FLUSH(hw);
3114
3115         return;
3116 }
3117
3118 static void
3119 ixgbe_disable_intr(struct adapter *adapter)
3120 {
3121         if (adapter->msix_mem)
3122                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3123         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3124                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3125         } else {
3126                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3127                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3128                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3129         }
3130         IXGBE_WRITE_FLUSH(&adapter->hw);
3131         return;
3132 }
3133
3134 /*
3135 ** Get the width and transaction speed of
3136 ** the slot this adapter is plugged into.
3137 */
3138 static void
3139 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3140 {
3141         device_t                dev = ((struct ixgbe_osdep *)hw->back)->dev;
3142         struct ixgbe_mac_info   *mac = &hw->mac;
3143         u16                     link;
3144         u32                     offset;
3145
3146         /* For most devices simply call the shared code routine */
3147         if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3148                 ixgbe_get_bus_info(hw);
3149                 /* These devices don't use PCI-E */
3150                 switch (hw->mac.type) {
3151                 case ixgbe_mac_X550EM_x:
3152                         return;
3153                 default:
3154                         goto display;
3155                 }
3156         }
3157
3158         /*
3159         ** For the Quad port adapter we need to parse back
3160         ** up the PCI tree to find the speed of the expansion
3161         ** slot into which this adapter is plugged. A bit more work.
3162         */
3163         dev = device_get_parent(device_get_parent(dev));
3164 #ifdef IXGBE_DEBUG
3165         device_printf(dev, "parent pcib = %x,%x,%x\n",
3166             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3167 #endif
3168         dev = device_get_parent(device_get_parent(dev));
3169 #ifdef IXGBE_DEBUG
3170         device_printf(dev, "slot pcib = %x,%x,%x\n",
3171             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3172 #endif
3173         /* Now get the PCI Express Capabilities offset */
3174         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3175         /* ...and read the Link Status Register */
3176         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3177         switch (link & IXGBE_PCI_LINK_WIDTH) {
3178         case IXGBE_PCI_LINK_WIDTH_1:
3179                 hw->bus.width = ixgbe_bus_width_pcie_x1;
3180                 break;
3181         case IXGBE_PCI_LINK_WIDTH_2:
3182                 hw->bus.width = ixgbe_bus_width_pcie_x2;
3183                 break;
3184         case IXGBE_PCI_LINK_WIDTH_4:
3185                 hw->bus.width = ixgbe_bus_width_pcie_x4;
3186                 break;
3187         case IXGBE_PCI_LINK_WIDTH_8:
3188                 hw->bus.width = ixgbe_bus_width_pcie_x8;
3189                 break;
3190         default:
3191                 hw->bus.width = ixgbe_bus_width_unknown;
3192                 break;
3193         }
3194
3195         switch (link & IXGBE_PCI_LINK_SPEED) {
3196         case IXGBE_PCI_LINK_SPEED_2500:
3197                 hw->bus.speed = ixgbe_bus_speed_2500;
3198                 break;
3199         case IXGBE_PCI_LINK_SPEED_5000:
3200                 hw->bus.speed = ixgbe_bus_speed_5000;
3201                 break;
3202         case IXGBE_PCI_LINK_SPEED_8000:
3203                 hw->bus.speed = ixgbe_bus_speed_8000;
3204                 break;
3205         default:
3206                 hw->bus.speed = ixgbe_bus_speed_unknown;
3207                 break;
3208         }
3209
3210         mac->ops.set_lan_id(hw);
3211
3212 display:
3213         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3214             ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3215             (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3216             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3217             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3218             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3219             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3220             ("Unknown"));
3221
3222         if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3223             ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3224             (hw->bus.speed == ixgbe_bus_speed_2500))) {
3225                 device_printf(dev, "PCI-Express bandwidth available"
3226                     " for this card\n     is not sufficient for"
3227                     " optimal performance.\n");
3228                 device_printf(dev, "For optimal performance a x8 "
3229                     "PCIE, or x4 PCIE Gen2 slot is required.\n");
3230         }
3231         if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3232             ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3233             (hw->bus.speed < ixgbe_bus_speed_8000))) {
3234                 device_printf(dev, "PCI-Express bandwidth available"
3235                     " for this card\n     is not sufficient for"
3236                     " optimal performance.\n");
3237                 device_printf(dev, "For optimal performance a x8 "
3238                     "PCIE Gen3 slot is required.\n");
3239         }
3240
3241         return;
3242 }
3243
3244
3245 /*
3246 ** Setup the correct IVAR register for a particular MSIX interrupt
3247 **   (yes this is all very magic and confusing :)
3248 **  - entry is the register array entry
3249 **  - vector is the MSIX vector for this queue
3250 **  - type is RX/TX/MISC
3251 */
3252 static void
3253 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3254 {
3255         struct ixgbe_hw *hw = &adapter->hw;
3256         u32 ivar, index;
3257
3258         vector |= IXGBE_IVAR_ALLOC_VAL;
3259
3260         switch (hw->mac.type) {
3261
3262         case ixgbe_mac_82598EB:
3263                 if (type == -1)
3264                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3265                 else
3266                         entry += (type * 64);
3267                 index = (entry >> 2) & 0x1F;
3268                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3269                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3270                 ivar |= (vector << (8 * (entry & 0x3)));
3271                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3272                 break;
3273
3274         case ixgbe_mac_82599EB:
3275         case ixgbe_mac_X540:
3276         case ixgbe_mac_X550:
3277         case ixgbe_mac_X550EM_x:
3278                 if (type == -1) { /* MISC IVAR */
3279                         index = (entry & 1) * 8;
3280                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3281                         ivar &= ~(0xFF << index);
3282                         ivar |= (vector << index);
3283                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3284                 } else {        /* RX/TX IVARS */
3285                         index = (16 * (entry & 1)) + (8 * type);
3286                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3287                         ivar &= ~(0xFF << index);
3288                         ivar |= (vector << index);
3289                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3290                 }
3291
3292         default:
3293                 break;
3294         }
3295 }
3296
3297 static void
3298 ixgbe_configure_ivars(struct adapter *adapter)
3299 {
3300         struct  ix_queue *que = adapter->queues;
3301         u32 newitr;
3302
3303         if (ixgbe_max_interrupt_rate > 0)
3304                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3305         else {
3306                 /*
3307                 ** Disable DMA coalescing if interrupt moderation is
3308                 ** disabled.
3309                 */
3310                 adapter->dmac = 0;
3311                 newitr = 0;
3312         }
3313
3314         for (int i = 0; i < adapter->num_queues; i++, que++) {
3315                 /* First the RX queue entry */
3316                 ixgbe_set_ivar(adapter, i, que->msix, 0);
3317                 /* ... and the TX */
3318                 ixgbe_set_ivar(adapter, i, que->msix, 1);
3319                 /* Set an Initial EITR value */
3320                 IXGBE_WRITE_REG(&adapter->hw,
3321                     IXGBE_EITR(que->msix), newitr);
3322         }
3323
3324         /* For the Link interrupt */
3325         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3326 }
3327
3328 /*
3329 ** ixgbe_sfp_probe - called in the local timer to
3330 ** determine if a port had optics inserted.
3331 */  
3332 static bool ixgbe_sfp_probe(struct adapter *adapter)
3333 {
3334         struct ixgbe_hw *hw = &adapter->hw;
3335         device_t        dev = adapter->dev;
3336         bool            result = FALSE;
3337
3338         if ((hw->phy.type == ixgbe_phy_nl) &&
3339             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3340                 s32 ret = hw->phy.ops.identify_sfp(hw);
3341                 if (ret)
3342                         goto out;
3343                 ret = hw->phy.ops.reset(hw);
3344                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3345                         device_printf(dev,"Unsupported SFP+ module detected!");
3346                         printf(" Reload driver with supported module.\n");
3347                         adapter->sfp_probe = FALSE;
3348                         goto out;
3349                 } else
3350                         device_printf(dev,"SFP+ module detected!\n");
3351                 /* We now have supported optics */
3352                 adapter->sfp_probe = FALSE;
3353                 /* Set the optics type so system reports correctly */
3354                 ixgbe_setup_optics(adapter);
3355                 result = TRUE;
3356         }
3357 out:
3358         return (result);
3359 }
3360
3361 /*
3362 ** Tasklet handler for MSIX Link interrupts
3363 **  - do outside interrupt since it might sleep
3364 */
3365 static void
3366 ixgbe_handle_link(void *context, int pending)
3367 {
3368         struct adapter  *adapter = context;
3369
3370         ixgbe_check_link(&adapter->hw,
3371             &adapter->link_speed, &adapter->link_up, 0);
3372         ixgbe_update_link_status(adapter);
3373 }
3374
3375 /*
3376 ** Tasklet for handling SFP module interrupts
3377 */
3378 static void
3379 ixgbe_handle_mod(void *context, int pending)
3380 {
3381         struct adapter  *adapter = context;
3382         struct ixgbe_hw *hw = &adapter->hw;
3383         device_t        dev = adapter->dev;
3384         u32 err;
3385
3386         err = hw->phy.ops.identify_sfp(hw);
3387         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3388                 device_printf(dev,
3389                     "Unsupported SFP+ module type was detected.\n");
3390                 return;
3391         }
3392         err = hw->mac.ops.setup_sfp(hw);
3393         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3394                 device_printf(dev,
3395                     "Setup failure - unsupported SFP+ module type.\n");
3396                 return;
3397         }
3398         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3399         return;
3400 }
3401
3402
3403 /*
3404 ** Tasklet for handling MSF (multispeed fiber) interrupts
3405 */
3406 static void
3407 ixgbe_handle_msf(void *context, int pending)
3408 {
3409         struct adapter  *adapter = context;
3410         struct ixgbe_hw *hw = &adapter->hw;
3411         u32 autoneg;
3412         bool negotiate;
3413         int err;
3414
3415         err = hw->phy.ops.identify_sfp(hw);
3416         if (!err) {
3417                 ixgbe_setup_optics(adapter);
3418                 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3419         }
3420
3421         autoneg = hw->phy.autoneg_advertised;
3422         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3423                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3424         if (hw->mac.ops.setup_link)
3425                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3426
3427         ifmedia_removeall(&adapter->media);
3428         ixgbe_add_media_types(adapter);
3429         return;
3430 }
3431
3432 /*
3433 ** Tasklet for handling interrupts from an external PHY
3434 */
3435 static void
3436 ixgbe_handle_phy(void *context, int pending)
3437 {
3438         struct adapter  *adapter = context;
3439         struct ixgbe_hw *hw = &adapter->hw;
3440         int error;
3441
3442         error = hw->phy.ops.handle_lasi(hw);
3443         if (error == IXGBE_ERR_OVERTEMP)
3444                 device_printf(adapter->dev,
3445                     "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3446                     " PHY will downshift to lower power state!\n");
3447         else if (error)
3448                 device_printf(adapter->dev,
3449                     "Error handling LASI interrupt: %d\n",
3450                     error);
3451         return;
3452 }
3453
3454 #ifdef IXGBE_FDIR
3455 /*
3456 ** Tasklet for reinitializing the Flow Director filter table
3457 */
3458 static void
3459 ixgbe_reinit_fdir(void *context, int pending)
3460 {
3461         struct adapter  *adapter = context;
3462         struct ifnet   *ifp = adapter->ifp;
3463
3464         if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3465                 return;
3466         ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3467         adapter->fdir_reinit = 0;
3468         /* re-enable flow director interrupts */
3469         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3470         /* Restart the interface */
3471         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3472         return;
3473 }
3474 #endif
3475
3476 /*********************************************************************
3477  *
3478  *  Configure DMA Coalescing
3479  *
3480  **********************************************************************/
3481 static void
3482 ixgbe_config_dmac(struct adapter *adapter)
3483 {
3484         struct ixgbe_hw *hw = &adapter->hw;
3485         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3486
3487         if (hw->mac.type < ixgbe_mac_X550 ||
3488             !hw->mac.ops.dmac_config)
3489                 return;
3490
3491         if (dcfg->watchdog_timer ^ adapter->dmac ||
3492             dcfg->link_speed ^ adapter->link_speed) {
3493                 dcfg->watchdog_timer = adapter->dmac;
3494                 dcfg->fcoe_en = false;
3495                 dcfg->link_speed = adapter->link_speed;
3496                 dcfg->num_tcs = 1;
3497                 
3498                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3499                     dcfg->watchdog_timer, dcfg->link_speed);
3500
3501                 hw->mac.ops.dmac_config(hw);
3502         }
3503 }
3504
3505 /*
3506  * Checks whether the adapter supports Energy Efficient Ethernet
3507  * or not, based on device ID.
3508  */
3509 static void
3510 ixgbe_check_eee_support(struct adapter *adapter)
3511 {
3512         struct ixgbe_hw *hw = &adapter->hw;
3513
3514         adapter->eee_support = adapter->eee_enabled =
3515             (hw->device_id == IXGBE_DEV_ID_X550T ||
3516                 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3517 }
3518
3519 /*
3520  * Checks whether the adapter's ports are capable of
3521  * Wake On LAN by reading the adapter's NVM.
3522  *
3523  * Sets each port's hw->wol_enabled value depending
3524  * on the value read here.
3525  */
3526 static void
3527 ixgbe_check_wol_support(struct adapter *adapter)
3528 {
3529         struct ixgbe_hw *hw = &adapter->hw;
3530         u16 dev_caps = 0;
3531
3532         /* Find out WoL support for port */
3533         adapter->wol_support = hw->wol_enabled = 0;
3534         ixgbe_get_device_caps(hw, &dev_caps);
3535         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3536             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3537                 hw->bus.func == 0))
3538             adapter->wol_support = hw->wol_enabled = 1;
3539
3540         /* Save initial wake up filter configuration */
3541         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3542
3543         return;
3544 }
3545
3546 /*
3547  * Prepare the adapter/port for LPLU and/or WoL
3548  */
3549 static int
3550 ixgbe_setup_low_power_mode(struct adapter *adapter)
3551 {
3552         struct ixgbe_hw *hw = &adapter->hw;
3553         device_t dev = adapter->dev;
3554         s32 error = 0;
3555
3556         mtx_assert(&adapter->core_mtx, MA_OWNED);
3557
3558         /* Limit power management flow to X550EM baseT */
3559         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3560             && hw->phy.ops.enter_lplu) {
3561                 /* Turn off support for APM wakeup. (Using ACPI instead) */
3562                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3563                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3564
3565                 /*
3566                  * Clear Wake Up Status register to prevent any previous wakeup
3567                  * events from waking us up immediately after we suspend.
3568                  */
3569                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3570
3571                 /*
3572                  * Program the Wakeup Filter Control register with user filter
3573                  * settings
3574                  */
3575                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3576
3577                 /* Enable wakeups and power management in Wakeup Control */
3578                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3579                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3580
3581                 /* X550EM baseT adapters need a special LPLU flow */
3582                 hw->phy.reset_disable = true;
3583                 ixgbe_stop(adapter);
3584                 error = hw->phy.ops.enter_lplu(hw);
3585                 if (error)
3586                         device_printf(dev,
3587                             "Error entering LPLU: %d\n", error);
3588                 hw->phy.reset_disable = false;
3589         } else {
3590                 /* Just stop for other adapters */
3591                 ixgbe_stop(adapter);
3592         }
3593
3594         return error;
3595 }
3596
3597 /**********************************************************************
3598  *
3599  *  Update the board statistics counters.
3600  *
3601  **********************************************************************/
3602 static void
3603 ixgbe_update_stats_counters(struct adapter *adapter)
3604 {
3605         struct ixgbe_hw *hw = &adapter->hw;
3606         u32 missed_rx = 0, bprc, lxon, lxoff, total;
3607         u64 total_missed_rx = 0;
3608
3609         adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3610         adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3611         adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3612         adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3613
3614         for (int i = 0; i < 16; i++) {
3615                 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3616                 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3617                 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3618         }
3619         adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3620         adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3621         adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3622
3623         /* Hardware workaround, gprc counts missed packets */
3624         adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3625         adapter->stats.pf.gprc -= missed_rx;
3626
3627         if (hw->mac.type != ixgbe_mac_82598EB) {
3628                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3629                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3630                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3631                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3632                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3633                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3634                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3635                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3636         } else {
3637                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3638                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3639                 /* 82598 only has a counter in the high register */
3640                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3641                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3642                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3643         }
3644
3645         /*
3646          * Workaround: mprc hardware is incorrectly counting
3647          * broadcasts, so for now we subtract those.
3648          */
3649         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3650         adapter->stats.pf.bprc += bprc;
3651         adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3652         if (hw->mac.type == ixgbe_mac_82598EB)
3653                 adapter->stats.pf.mprc -= bprc;
3654
3655         adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3656         adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3657         adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3658         adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3659         adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3660         adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3661
3662         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3663         adapter->stats.pf.lxontxc += lxon;
3664         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3665         adapter->stats.pf.lxofftxc += lxoff;
3666         total = lxon + lxoff;
3667
3668         adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3669         adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3670         adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3671         adapter->stats.pf.gptc -= total;
3672         adapter->stats.pf.mptc -= total;
3673         adapter->stats.pf.ptc64 -= total;
3674         adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3675
3676         adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3677         adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3678         adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3679         adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3680         adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3681         adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3682         adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3683         adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3684         adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3685         adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3686         adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3687         adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3688         adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3689         adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3690         adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3691         adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3692         adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3693         adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3694         /* Only read FCOE on 82599 */
3695         if (hw->mac.type != ixgbe_mac_82598EB) {
3696                 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3697                 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3698                 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3699                 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3700                 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3701         }
3702
3703         /* Fill out the OS statistics structure */
3704         IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3705         IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3706         IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3707         IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3708         IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3709         IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3710         IXGBE_SET_COLLISIONS(adapter, 0);
3711         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3712         IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3713             + adapter->stats.pf.rlec);
3714 }
3715
3716 #if __FreeBSD_version >= 1100036
3717 static uint64_t
3718 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3719 {
3720         struct adapter *adapter;
3721         struct tx_ring *txr;
3722         uint64_t rv;
3723
3724         adapter = if_getsoftc(ifp);
3725
3726         switch (cnt) {
3727         case IFCOUNTER_IPACKETS:
3728                 return (adapter->ipackets);
3729         case IFCOUNTER_OPACKETS:
3730                 return (adapter->opackets);
3731         case IFCOUNTER_IBYTES:
3732                 return (adapter->ibytes);
3733         case IFCOUNTER_OBYTES:
3734                 return (adapter->obytes);
3735         case IFCOUNTER_IMCASTS:
3736                 return (adapter->imcasts);
3737         case IFCOUNTER_OMCASTS:
3738                 return (adapter->omcasts);
3739         case IFCOUNTER_COLLISIONS:
3740                 return (0);
3741         case IFCOUNTER_IQDROPS:
3742                 return (adapter->iqdrops);
3743         case IFCOUNTER_OQDROPS:
3744                 rv = 0;
3745                 txr = adapter->tx_rings;
3746                 for (int i = 0; i < adapter->num_queues; i++, txr++)
3747                         rv += txr->br->br_drops;
3748                 return (rv);
3749         case IFCOUNTER_IERRORS:
3750                 return (adapter->ierrors);
3751         default:
3752                 return (if_get_counter_default(ifp, cnt));
3753         }
3754 }
3755 #endif
3756
3757 /** ixgbe_sysctl_tdh_handler - Handler function
3758  *  Retrieves the TDH value from the hardware
3759  */
3760 static int 
3761 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3762 {
3763         int error;
3764
3765         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3766         if (!txr) return 0;
3767
3768         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3769         error = sysctl_handle_int(oidp, &val, 0, req);
3770         if (error || !req->newptr)
3771                 return error;
3772         return 0;
3773 }
3774
3775 /** ixgbe_sysctl_tdt_handler - Handler function
3776  *  Retrieves the TDT value from the hardware
3777  */
3778 static int 
3779 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3780 {
3781         int error;
3782
3783         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3784         if (!txr) return 0;
3785
3786         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3787         error = sysctl_handle_int(oidp, &val, 0, req);
3788         if (error || !req->newptr)
3789                 return error;
3790         return 0;
3791 }
3792
3793 /** ixgbe_sysctl_rdh_handler - Handler function
3794  *  Retrieves the RDH value from the hardware
3795  */
3796 static int 
3797 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3798 {
3799         int error;
3800
3801         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3802         if (!rxr) return 0;
3803
3804         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3805         error = sysctl_handle_int(oidp, &val, 0, req);
3806         if (error || !req->newptr)
3807                 return error;
3808         return 0;
3809 }
3810
3811 /** ixgbe_sysctl_rdt_handler - Handler function
3812  *  Retrieves the RDT value from the hardware
3813  */
3814 static int 
3815 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3816 {
3817         int error;
3818
3819         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3820         if (!rxr) return 0;
3821
3822         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3823         error = sysctl_handle_int(oidp, &val, 0, req);
3824         if (error || !req->newptr)
3825                 return error;
3826         return 0;
3827 }
3828
3829 static int
3830 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3831 {
3832         int error;
3833         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3834         unsigned int reg, usec, rate;
3835
3836         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3837         usec = ((reg & 0x0FF8) >> 3);
3838         if (usec > 0)
3839                 rate = 500000 / usec;
3840         else
3841                 rate = 0;
3842         error = sysctl_handle_int(oidp, &rate, 0, req);
3843         if (error || !req->newptr)
3844                 return error;
3845         reg &= ~0xfff; /* default, no limitation */
3846         ixgbe_max_interrupt_rate = 0;
3847         if (rate > 0 && rate < 500000) {
3848                 if (rate < 1000)
3849                         rate = 1000;
3850                 ixgbe_max_interrupt_rate = rate;
3851                 reg |= ((4000000/rate) & 0xff8 );
3852         }
3853         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3854         return 0;
3855 }
3856
3857 static void
3858 ixgbe_add_device_sysctls(struct adapter *adapter)
3859 {
3860         device_t dev = adapter->dev;
3861         struct ixgbe_hw *hw = &adapter->hw;
3862         struct sysctl_oid_list *child;
3863         struct sysctl_ctx_list *ctx;
3864
3865         ctx = device_get_sysctl_ctx(dev);
3866         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3867
3868         /* Sysctls for all devices */
3869         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3870                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3871                         ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3872
3873         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3874                         CTLFLAG_RW,
3875                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
3876
3877         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3878                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3879                         ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3880
3881         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3882                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3883                         ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3884
3885         /* for X550 devices */
3886         if (hw->mac.type >= ixgbe_mac_X550)
3887                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3888                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3889                                 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3890
3891         /* for X550T and X550EM backplane devices */
3892         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3893             hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3894                 struct sysctl_oid *eee_node;
3895                 struct sysctl_oid_list *eee_list;
3896
3897                 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3898                                            CTLFLAG_RD, NULL,
3899                                            "Energy Efficient Ethernet sysctls");
3900                 eee_list = SYSCTL_CHILDREN(eee_node);
3901
3902                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3903                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3904                                 ixgbe_sysctl_eee_enable, "I",
3905                                 "Enable or Disable EEE");
3906
3907                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3908                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3909                                 ixgbe_sysctl_eee_negotiated, "I",
3910                                 "EEE negotiated on link");
3911
3912                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3913                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3914                                 ixgbe_sysctl_eee_tx_lpi_status, "I",
3915                                 "Whether or not TX link is in LPI state");
3916
3917                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3918                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3919                                 ixgbe_sysctl_eee_rx_lpi_status, "I",
3920                                 "Whether or not RX link is in LPI state");
3921         }
3922
3923         /* for certain 10GBaseT devices */
3924         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3925             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3926                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3927                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3928                                 ixgbe_sysctl_wol_enable, "I",
3929                                 "Enable/Disable Wake on LAN");
3930
3931                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3932                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3933                                 ixgbe_sysctl_wufc, "I",
3934                                 "Enable/Disable Wake Up Filters");
3935         }
3936
3937         /* for X550EM 10GBaseT devices */
3938         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3939                 struct sysctl_oid *phy_node;
3940                 struct sysctl_oid_list *phy_list;
3941
3942                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3943                                            CTLFLAG_RD, NULL,
3944                                            "External PHY sysctls");
3945                 phy_list = SYSCTL_CHILDREN(phy_node);
3946
3947                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3948                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3949                                 ixgbe_sysctl_phy_temp, "I",
3950                                 "Current External PHY Temperature (Celsius)");
3951
3952                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3953                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3954                                 ixgbe_sysctl_phy_overtemp_occurred, "I",
3955                                 "External PHY High Temperature Event Occurred");
3956         }
3957 }
3958
3959 /*
3960  * Add sysctl variables, one per statistic, to the system.
3961  */
3962 static void
3963 ixgbe_add_hw_stats(struct adapter *adapter)
3964 {
3965         device_t dev = adapter->dev;
3966
3967         struct tx_ring *txr = adapter->tx_rings;
3968         struct rx_ring *rxr = adapter->rx_rings;
3969
3970         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3971         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3972         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3973         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3974
3975         struct sysctl_oid *stat_node, *queue_node;
3976         struct sysctl_oid_list *stat_list, *queue_list;
3977
3978 #define QUEUE_NAME_LEN 32
3979         char namebuf[QUEUE_NAME_LEN];
3980
3981         /* Driver Statistics */
3982         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3983                         CTLFLAG_RD, &adapter->dropped_pkts,
3984                         "Driver dropped packets");
3985         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3986                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3987                         "m_defrag() failed");
3988         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3989                         CTLFLAG_RD, &adapter->watchdog_events,
3990                         "Watchdog timeouts");
3991         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3992                         CTLFLAG_RD, &adapter->link_irq,
3993                         "Link MSIX IRQ Handled");
3994
3995         for (int i = 0; i < adapter->num_queues; i++, txr++) {
3996                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3997                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3998                                             CTLFLAG_RD, NULL, "Queue Name");
3999                 queue_list = SYSCTL_CHILDREN(queue_node);
4000
4001                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4002                                 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4003                                 sizeof(&adapter->queues[i]),
4004                                 ixgbe_sysctl_interrupt_rate_handler, "IU",
4005                                 "Interrupt Rate");
4006                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4007                                 CTLFLAG_RD, &(adapter->queues[i].irqs),
4008                                 "irqs on this queue");
4009                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
4010                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4011                                 ixgbe_sysctl_tdh_handler, "IU",
4012                                 "Transmit Descriptor Head");
4013                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
4014                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4015                                 ixgbe_sysctl_tdt_handler, "IU",
4016                                 "Transmit Descriptor Tail");
4017                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4018                                 CTLFLAG_RD, &txr->tso_tx,
4019                                 "TSO");
4020                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4021                                 CTLFLAG_RD, &txr->no_tx_dma_setup,
4022                                 "Driver tx dma failure in xmit");
4023                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4024                                 CTLFLAG_RD, &txr->no_desc_avail,
4025                                 "Queue No Descriptor Available");
4026                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4027                                 CTLFLAG_RD, &txr->total_packets,
4028                                 "Queue Packets Transmitted");
4029                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4030                                 CTLFLAG_RD, &txr->br->br_drops,
4031                                 "Packets dropped in buf_ring");
4032         }
4033
4034         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4035                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4036                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4037                                             CTLFLAG_RD, NULL, "Queue Name");
4038                 queue_list = SYSCTL_CHILDREN(queue_node);
4039
4040                 struct lro_ctrl *lro = &rxr->lro;
4041
4042                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4043                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4044                                             CTLFLAG_RD, NULL, "Queue Name");
4045                 queue_list = SYSCTL_CHILDREN(queue_node);
4046
4047                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
4048                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4049                                 ixgbe_sysctl_rdh_handler, "IU",
4050                                 "Receive Descriptor Head");
4051                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
4052                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4053                                 ixgbe_sysctl_rdt_handler, "IU",
4054                                 "Receive Descriptor Tail");
4055                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4056                                 CTLFLAG_RD, &rxr->rx_packets,
4057                                 "Queue Packets Received");
4058                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4059                                 CTLFLAG_RD, &rxr->rx_bytes,
4060                                 "Queue Bytes Received");
4061                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4062                                 CTLFLAG_RD, &rxr->rx_copies,
4063                                 "Copied RX Frames");
4064                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4065                                 CTLFLAG_RD, &lro->lro_queued, 0,
4066                                 "LRO Queued");
4067                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4068                                 CTLFLAG_RD, &lro->lro_flushed, 0,
4069                                 "LRO Flushed");
4070         }
4071
4072         /* MAC stats get the own sub node */
4073
4074         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4075                                     CTLFLAG_RD, NULL, "MAC Statistics");
4076         stat_list = SYSCTL_CHILDREN(stat_node);
4077
4078         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4079                         CTLFLAG_RD, &stats->crcerrs,
4080                         "CRC Errors");
4081         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4082                         CTLFLAG_RD, &stats->illerrc,
4083                         "Illegal Byte Errors");
4084         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4085                         CTLFLAG_RD, &stats->errbc,
4086                         "Byte Errors");
4087         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4088                         CTLFLAG_RD, &stats->mspdc,
4089                         "MAC Short Packets Discarded");
4090         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4091                         CTLFLAG_RD, &stats->mlfc,
4092                         "MAC Local Faults");
4093         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4094                         CTLFLAG_RD, &stats->mrfc,
4095                         "MAC Remote Faults");
4096         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4097                         CTLFLAG_RD, &stats->rlec,
4098                         "Receive Length Errors");
4099
4100         /* Flow Control stats */
4101         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4102                         CTLFLAG_RD, &stats->lxontxc,
4103                         "Link XON Transmitted");
4104         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4105                         CTLFLAG_RD, &stats->lxonrxc,
4106                         "Link XON Received");
4107         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4108                         CTLFLAG_RD, &stats->lxofftxc,
4109                         "Link XOFF Transmitted");
4110         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4111                         CTLFLAG_RD, &stats->lxoffrxc,
4112                         "Link XOFF Received");
4113
4114         /* Packet Reception Stats */
4115         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4116                         CTLFLAG_RD, &stats->tor, 
4117                         "Total Octets Received"); 
4118         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4119                         CTLFLAG_RD, &stats->gorc, 
4120                         "Good Octets Received"); 
4121         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4122                         CTLFLAG_RD, &stats->tpr,
4123                         "Total Packets Received");
4124         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4125                         CTLFLAG_RD, &stats->gprc,
4126                         "Good Packets Received");
4127         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4128                         CTLFLAG_RD, &stats->mprc,
4129                         "Multicast Packets Received");
4130         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4131                         CTLFLAG_RD, &stats->bprc,
4132                         "Broadcast Packets Received");
4133         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4134                         CTLFLAG_RD, &stats->prc64,
4135                         "64 byte frames received ");
4136         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4137                         CTLFLAG_RD, &stats->prc127,
4138                         "65-127 byte frames received");
4139         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4140                         CTLFLAG_RD, &stats->prc255,
4141                         "128-255 byte frames received");
4142         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4143                         CTLFLAG_RD, &stats->prc511,
4144                         "256-511 byte frames received");
4145         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4146                         CTLFLAG_RD, &stats->prc1023,
4147                         "512-1023 byte frames received");
4148         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4149                         CTLFLAG_RD, &stats->prc1522,
4150                         "1023-1522 byte frames received");
4151         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4152                         CTLFLAG_RD, &stats->ruc,
4153                         "Receive Undersized");
4154         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4155                         CTLFLAG_RD, &stats->rfc,
4156                         "Fragmented Packets Received ");
4157         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4158                         CTLFLAG_RD, &stats->roc,
4159                         "Oversized Packets Received");
4160         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4161                         CTLFLAG_RD, &stats->rjc,
4162                         "Received Jabber");
4163         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4164                         CTLFLAG_RD, &stats->mngprc,
4165                         "Management Packets Received");
4166         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4167                         CTLFLAG_RD, &stats->mngptc,
4168                         "Management Packets Dropped");
4169         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4170                         CTLFLAG_RD, &stats->xec,
4171                         "Checksum Errors");
4172
4173         /* Packet Transmission Stats */
4174         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4175                         CTLFLAG_RD, &stats->gotc, 
4176                         "Good Octets Transmitted"); 
4177         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4178                         CTLFLAG_RD, &stats->tpt,
4179                         "Total Packets Transmitted");
4180         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4181                         CTLFLAG_RD, &stats->gptc,
4182                         "Good Packets Transmitted");
4183         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4184                         CTLFLAG_RD, &stats->bptc,
4185                         "Broadcast Packets Transmitted");
4186         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4187                         CTLFLAG_RD, &stats->mptc,
4188                         "Multicast Packets Transmitted");
4189         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4190                         CTLFLAG_RD, &stats->mngptc,
4191                         "Management Packets Transmitted");
4192         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4193                         CTLFLAG_RD, &stats->ptc64,
4194                         "64 byte frames transmitted ");
4195         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4196                         CTLFLAG_RD, &stats->ptc127,
4197                         "65-127 byte frames transmitted");
4198         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4199                         CTLFLAG_RD, &stats->ptc255,
4200                         "128-255 byte frames transmitted");
4201         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4202                         CTLFLAG_RD, &stats->ptc511,
4203                         "256-511 byte frames transmitted");
4204         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4205                         CTLFLAG_RD, &stats->ptc1023,
4206                         "512-1023 byte frames transmitted");
4207         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4208                         CTLFLAG_RD, &stats->ptc1522,
4209                         "1024-1522 byte frames transmitted");
4210 }
4211
4212 /*
4213 ** Set flow control using sysctl:
4214 ** Flow control values:
4215 **      0 - off
4216 **      1 - rx pause
4217 **      2 - tx pause
4218 **      3 - full
4219 */
4220 static int
4221 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4222 {
4223         int error, last;
4224         struct adapter *adapter = (struct adapter *) arg1;
4225
4226         last = adapter->fc;
4227         error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4228         if ((error) || (req->newptr == NULL))
4229                 return (error);
4230
4231         /* Don't bother if it's not changed */
4232         if (adapter->fc == last)
4233                 return (0);
4234
4235         switch (adapter->fc) {
4236                 case ixgbe_fc_rx_pause:
4237                 case ixgbe_fc_tx_pause:
4238                 case ixgbe_fc_full:
4239                         adapter->hw.fc.requested_mode = adapter->fc;
4240                         if (adapter->num_queues > 1)
4241                                 ixgbe_disable_rx_drop(adapter);
4242                         break;
4243                 case ixgbe_fc_none:
4244                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
4245                         if (adapter->num_queues > 1)
4246                                 ixgbe_enable_rx_drop(adapter);
4247                         break;
4248                 default:
4249                         adapter->fc = last;
4250                         return (EINVAL);
4251         }
4252         /* Don't autoneg if forcing a value */
4253         adapter->hw.fc.disable_fc_autoneg = TRUE;
4254         ixgbe_fc_enable(&adapter->hw);
4255         return error;
4256 }
4257
4258 /*
4259 ** Control advertised link speed:
4260 **      Flags:
4261 **      0x1 - advertise 100 Mb
4262 **      0x2 - advertise 1G
4263 **      0x4 - advertise 10G
4264 */
4265 static int
4266 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4267 {
4268         int                     error = 0, requested;
4269         struct adapter          *adapter;
4270         device_t                dev;
4271         struct ixgbe_hw         *hw;
4272         ixgbe_link_speed        speed = 0;
4273
4274         adapter = (struct adapter *) arg1;
4275         dev = adapter->dev;
4276         hw = &adapter->hw;
4277
4278         requested = adapter->advertise;
4279         error = sysctl_handle_int(oidp, &requested, 0, req);
4280         if ((error) || (req->newptr == NULL))
4281                 return (error);
4282
4283         /* Checks to validate new value */
4284         if (adapter->advertise == requested) /* no change */
4285                 return (0);
4286
4287         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4288             (hw->phy.multispeed_fiber))) {
4289                 device_printf(dev,
4290                     "Advertised speed can only be set on copper or "
4291                     "multispeed fiber media types.\n");
4292                 return (EINVAL);
4293         }
4294
4295         if (requested < 0x1 || requested > 0x7) {
4296                 device_printf(dev,
4297                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4298                 return (EINVAL);
4299         }
4300
4301         if ((requested & 0x1)
4302             && (hw->mac.type != ixgbe_mac_X540)
4303             && (hw->mac.type != ixgbe_mac_X550)) {
4304                 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4305                 return (EINVAL);
4306         }
4307
4308         /* Set new value and report new advertised mode */
4309         if (requested & 0x1)
4310                 speed |= IXGBE_LINK_SPEED_100_FULL;
4311         if (requested & 0x2)
4312                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4313         if (requested & 0x4)
4314                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4315
4316         hw->mac.autotry_restart = TRUE;
4317         hw->mac.ops.setup_link(hw, speed, TRUE);
4318         adapter->advertise = requested;
4319
4320         return (error);
4321 }
4322
4323 /*
4324  * The following two sysctls are for X550 BaseT devices;
4325  * they deal with the external PHY used in them.
4326  */
4327 static int
4328 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4329 {
4330         struct adapter  *adapter = (struct adapter *) arg1;
4331         struct ixgbe_hw *hw = &adapter->hw;
4332         u16 reg;
4333
4334         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4335                 device_printf(adapter->dev,
4336                     "Device has no supported external thermal sensor.\n");
4337                 return (ENODEV);
4338         }
4339
4340         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4341                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4342                                       &reg)) {
4343                 device_printf(adapter->dev,
4344                     "Error reading from PHY's current temperature register\n");
4345                 return (EAGAIN);
4346         }
4347
4348         /* Shift temp for output */
4349         reg = reg >> 8;
4350
4351         return (sysctl_handle_int(oidp, NULL, reg, req));
4352 }
4353
4354 /*
4355  * Reports whether the current PHY temperature is over
4356  * the overtemp threshold.
4357  *  - This is reported directly from the PHY
4358  */
4359 static int
4360 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4361 {
4362         struct adapter  *adapter = (struct adapter *) arg1;
4363         struct ixgbe_hw *hw = &adapter->hw;
4364         u16 reg;
4365
4366         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4367                 device_printf(adapter->dev,
4368                     "Device has no supported external thermal sensor.\n");
4369                 return (ENODEV);
4370         }
4371
4372         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4373                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4374                                       &reg)) {
4375                 device_printf(adapter->dev,
4376                     "Error reading from PHY's temperature status register\n");
4377                 return (EAGAIN);
4378         }
4379
4380         /* Get occurrence bit */
4381         reg = !!(reg & 0x4000);
4382         return (sysctl_handle_int(oidp, 0, reg, req));
4383 }
4384
4385 /*
4386 ** Thermal Shutdown Trigger (internal MAC)
4387 **   - Set this to 1 to cause an overtemp event to occur
4388 */
4389 static int
4390 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4391 {
4392         struct adapter  *adapter = (struct adapter *) arg1;
4393         struct ixgbe_hw *hw = &adapter->hw;
4394         int error, fire = 0;
4395
4396         error = sysctl_handle_int(oidp, &fire, 0, req);
4397         if ((error) || (req->newptr == NULL))
4398                 return (error);
4399
4400         if (fire) {
4401                 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4402                 reg |= IXGBE_EICR_TS;
4403                 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4404         }
4405
4406         return (0);
4407 }
4408
4409 /*
4410 ** Manage DMA Coalescing.
4411 ** Control values:
4412 **      0/1 - off / on (use default value of 1000)
4413 **
4414 **      Legal timer values are:
4415 **      50,100,250,500,1000,2000,5000,10000
4416 **
4417 **      Turning off interrupt moderation will also turn this off.
4418 */
4419 static int
4420 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4421 {
4422         struct adapter *adapter = (struct adapter *) arg1;
4423         struct ixgbe_hw *hw = &adapter->hw;
4424         struct ifnet *ifp = adapter->ifp;
4425         int             error;
4426         u16             oldval;
4427
4428         oldval = adapter->dmac;
4429         error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4430         if ((error) || (req->newptr == NULL))
4431                 return (error);
4432
4433         switch (hw->mac.type) {
4434         case ixgbe_mac_X550:
4435         case ixgbe_mac_X550EM_x:
4436                 break;
4437         default:
4438                 device_printf(adapter->dev,
4439                     "DMA Coalescing is only supported on X550 devices\n");
4440                 return (ENODEV);
4441         }
4442
4443         switch (adapter->dmac) {
4444         case 0:
4445                 /* Disabled */
4446                 break;
4447         case 1: /* Enable and use default */
4448                 adapter->dmac = 1000;
4449                 break;
4450         case 50:
4451         case 100:
4452         case 250:
4453         case 500:
4454         case 1000:
4455         case 2000:
4456         case 5000:
4457         case 10000:
4458                 /* Legal values - allow */
4459                 break;
4460         default:
4461                 /* Do nothing, illegal value */
4462                 adapter->dmac = oldval;
4463                 return (EINVAL);
4464         }
4465
4466         /* Re-initialize hardware if it's already running */
4467         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4468                 ixgbe_init(adapter);
4469
4470         return (0);
4471 }
4472
4473 /*
4474  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4475  * Values:
4476  *      0 - disabled
4477  *      1 - enabled
4478  */
4479 static int
4480 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4481 {
4482         struct adapter *adapter = (struct adapter *) arg1;
4483         struct ixgbe_hw *hw = &adapter->hw;
4484         int new_wol_enabled;
4485         int error = 0;
4486
4487         new_wol_enabled = hw->wol_enabled;
4488         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4489         if ((error) || (req->newptr == NULL))
4490                 return (error);
4491         if (new_wol_enabled == hw->wol_enabled)
4492                 return (0);
4493
4494         if (new_wol_enabled > 0 && !adapter->wol_support)
4495                 return (ENODEV);
4496         else
4497                 hw->wol_enabled = !!(new_wol_enabled);
4498
4499         return (0);
4500 }
4501
4502 /*
4503  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4504  * if supported by the adapter.
4505  * Values:
4506  *      0 - disabled
4507  *      1 - enabled
4508  */
4509 static int
4510 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4511 {
4512         struct adapter *adapter = (struct adapter *) arg1;
4513         struct ifnet *ifp = adapter->ifp;
4514         int new_eee_enabled, error = 0;
4515
4516         new_eee_enabled = adapter->eee_enabled;
4517         error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4518         if ((error) || (req->newptr == NULL))
4519                 return (error);
4520         if (new_eee_enabled == adapter->eee_enabled)
4521                 return (0);
4522
4523         if (new_eee_enabled > 0 && !adapter->eee_support)
4524                 return (ENODEV);
4525         else
4526                 adapter->eee_enabled = !!(new_eee_enabled);
4527
4528         /* Re-initialize hardware if it's already running */
4529         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4530                 ixgbe_init(adapter);
4531
4532         return (0);
4533 }
4534
4535 /*
4536  * Read-only sysctl indicating whether EEE support was negotiated
4537  * on the link.
4538  */
4539 static int
4540 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4541 {
4542         struct adapter *adapter = (struct adapter *) arg1;
4543         struct ixgbe_hw *hw = &adapter->hw;
4544         bool status;
4545
4546         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4547
4548         return (sysctl_handle_int(oidp, 0, status, req));
4549 }
4550
4551 /*
4552  * Read-only sysctl indicating whether RX Link is in LPI state.
4553  */
4554 static int
4555 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4556 {
4557         struct adapter *adapter = (struct adapter *) arg1;
4558         struct ixgbe_hw *hw = &adapter->hw;
4559         bool status;
4560
4561         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4562             IXGBE_EEE_RX_LPI_STATUS);
4563
4564         return (sysctl_handle_int(oidp, 0, status, req));
4565 }
4566
4567 /*
4568  * Read-only sysctl indicating whether TX Link is in LPI state.
4569  */
4570 static int
4571 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4572 {
4573         struct adapter *adapter = (struct adapter *) arg1;
4574         struct ixgbe_hw *hw = &adapter->hw;
4575         bool status;
4576
4577         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4578             IXGBE_EEE_TX_LPI_STATUS);
4579
4580         return (sysctl_handle_int(oidp, 0, status, req));
4581 }
4582
4583 /*
4584  * Sysctl to enable/disable the types of packets that the
4585  * adapter will wake up on upon receipt.
4586  * WUFC - Wake Up Filter Control
4587  * Flags:
4588  *      0x1  - Link Status Change
4589  *      0x2  - Magic Packet
4590  *      0x4  - Direct Exact
4591  *      0x8  - Directed Multicast
4592  *      0x10 - Broadcast
4593  *      0x20 - ARP/IPv4 Request Packet
4594  *      0x40 - Direct IPv4 Packet
4595  *      0x80 - Direct IPv6 Packet
4596  *
4597  * Setting another flag will cause the sysctl to return an
4598  * error.
4599  */
4600 static int
4601 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4602 {
4603         struct adapter *adapter = (struct adapter *) arg1;
4604         int error = 0;
4605         u32 new_wufc;
4606
4607         new_wufc = adapter->wufc;
4608
4609         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4610         if ((error) || (req->newptr == NULL))
4611                 return (error);
4612         if (new_wufc == adapter->wufc)
4613                 return (0);
4614
4615         if (new_wufc & 0xffffff00)
4616                 return (EINVAL);
4617         else {
4618                 new_wufc &= 0xff;
4619                 new_wufc |= (0xffffff & adapter->wufc);
4620                 adapter->wufc = new_wufc;
4621         }
4622
4623         return (0);
4624 }
4625
4626 /*
4627 ** Enable the hardware to drop packets when the buffer is
4628 ** full. This is useful when multiqueue,so that no single
4629 ** queue being full stalls the entire RX engine. We only
4630 ** enable this when Multiqueue AND when Flow Control is 
4631 ** disabled.
4632 */
4633 static void
4634 ixgbe_enable_rx_drop(struct adapter *adapter)
4635 {
4636         struct ixgbe_hw *hw = &adapter->hw;
4637
4638         for (int i = 0; i < adapter->num_queues; i++) {
4639                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4640                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4641                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4642         }
4643 }
4644
4645 static void
4646 ixgbe_disable_rx_drop(struct adapter *adapter)
4647 {
4648         struct ixgbe_hw *hw = &adapter->hw;
4649
4650         for (int i = 0; i < adapter->num_queues; i++) {
4651                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4652                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4653                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4654         }
4655 }
4656
4657 static void
4658 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4659 {
4660         u32 mask;
4661
4662         switch (adapter->hw.mac.type) {
4663         case ixgbe_mac_82598EB:
4664                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4665                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4666                 break;
4667         case ixgbe_mac_82599EB:
4668         case ixgbe_mac_X540:
4669         case ixgbe_mac_X550:
4670         case ixgbe_mac_X550EM_x:
4671                 mask = (queues & 0xFFFFFFFF);
4672                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4673                 mask = (queues >> 32);
4674                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4675                 break;
4676         default:
4677                 break;
4678         }
4679 }
4680
4681