]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ix.c
MFC ixgbe commits for 10.2:
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Set this to one to display debug statistics
45  *********************************************************************/
46 int             ixgbe_display_debug_stats = 0;
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixgbe_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64 {
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95         /* required last entry */
96         {0, 0, 0, 0, 0}
97 };
98
99 /*********************************************************************
100  *  Table of branding strings
101  *********************************************************************/
102
103 static char    *ixgbe_strings[] = {
104         "Intel(R) PRO/10GbE PCI-Express Network Driver"
105 };
106
107 /*********************************************************************
108  *  Function prototypes
109  *********************************************************************/
110 static int      ixgbe_probe(device_t);
111 static int      ixgbe_attach(device_t);
112 static int      ixgbe_detach(device_t);
113 static int      ixgbe_shutdown(device_t);
114 static int      ixgbe_suspend(device_t);
115 static int      ixgbe_resume(device_t);
116 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void     ixgbe_init(void *);
118 static void     ixgbe_init_locked(struct adapter *);
119 static void     ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
122 #endif
123 static void     ixgbe_add_media_types(struct adapter *);
124 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int      ixgbe_media_change(struct ifnet *);
126 static void     ixgbe_identify_hardware(struct adapter *);
127 static int      ixgbe_allocate_pci_resources(struct adapter *);
128 static void     ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int      ixgbe_allocate_msix(struct adapter *);
130 static int      ixgbe_allocate_legacy(struct adapter *);
131 static int      ixgbe_setup_msix(struct adapter *);
132 static void     ixgbe_free_pci_resources(struct adapter *);
133 static void     ixgbe_local_timer(void *);
134 static int      ixgbe_setup_interface(device_t, struct adapter *);
135 static void     ixgbe_config_dmac(struct adapter *);
136 static void     ixgbe_config_delay_values(struct adapter *);
137 static void     ixgbe_config_link(struct adapter *);
138 static void     ixgbe_check_eee_support(struct adapter *);
139 static void     ixgbe_check_wol_support(struct adapter *);
140 static int      ixgbe_setup_low_power_mode(struct adapter *);
141 static void     ixgbe_rearm_queues(struct adapter *, u64);
142
143 static void     ixgbe_initialize_transmit_units(struct adapter *);
144 static void     ixgbe_initialize_receive_units(struct adapter *);
145 static void     ixgbe_enable_rx_drop(struct adapter *);
146 static void     ixgbe_disable_rx_drop(struct adapter *);
147
148 static void     ixgbe_enable_intr(struct adapter *);
149 static void     ixgbe_disable_intr(struct adapter *);
150 static void     ixgbe_update_stats_counters(struct adapter *);
151 static void     ixgbe_set_promisc(struct adapter *);
152 static void     ixgbe_set_multi(struct adapter *);
153 static void     ixgbe_update_link_status(struct adapter *);
154 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void     ixgbe_configure_ivars(struct adapter *);
156 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
157
158 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
161
162 static void     ixgbe_add_device_sysctls(struct adapter *);
163 static void     ixgbe_add_hw_stats(struct adapter *);
164
165 /* Sysctl handlers */
166 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int      ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int      ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int      ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int      ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int      ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
178
179 /* Support for pluggable optic modules */
180 static bool     ixgbe_sfp_probe(struct adapter *);
181 static void     ixgbe_setup_optics(struct adapter *);
182
183 /* Legacy (single vector interrupt handler */
184 static void     ixgbe_legacy_irq(void *);
185
186 /* The MSI/X Interrupt handlers */
187 static void     ixgbe_msix_que(void *);
188 static void     ixgbe_msix_link(void *);
189
190 /* Deferred interrupt tasklets */
191 static void     ixgbe_handle_que(void *, int);
192 static void     ixgbe_handle_link(void *, int);
193 static void     ixgbe_handle_msf(void *, int);
194 static void     ixgbe_handle_mod(void *, int);
195 static void     ixgbe_handle_phy(void *, int);
196
197 #ifdef IXGBE_FDIR
198 static void     ixgbe_reinit_fdir(void *, int);
199 #endif
200
201 /*********************************************************************
202  *  FreeBSD Device Interface Entry Points
203  *********************************************************************/
204
205 static device_method_t ix_methods[] = {
206         /* Device interface */
207         DEVMETHOD(device_probe, ixgbe_probe),
208         DEVMETHOD(device_attach, ixgbe_attach),
209         DEVMETHOD(device_detach, ixgbe_detach),
210         DEVMETHOD(device_shutdown, ixgbe_shutdown),
211         DEVMETHOD(device_suspend, ixgbe_suspend),
212         DEVMETHOD(device_resume, ixgbe_resume),
213         DEVMETHOD_END
214 };
215
216 static driver_t ix_driver = {
217         "ix", ix_methods, sizeof(struct adapter),
218 };
219
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
222
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
225
226 /*
227 ** TUNEABLE PARAMETERS:
228 */
229
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231                    "IXGBE driver parameters");
232
233 /*
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
238 */
239 static int ixgbe_enable_aim = TRUE;
240 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241     "Enable adaptive interrupt moderation");
242
243 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
246
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 256;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251     &ixgbe_rx_process_limit, 0,
252     "Maximum number of received packets to process at a time,"
253     "-1 means unlimited");
254
255 /* How many packets txeof tries to clean at a time */
256 static int ixgbe_tx_process_limit = 256;
257 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259     &ixgbe_tx_process_limit, 0,
260     "Maximum number of sent packets to process at a time,"
261     "-1 means unlimited");
262
263 /*
264 ** Smart speed setting, default to on
265 ** this only works as a compile option
266 ** right now as its during attach, set
267 ** this to 'ixgbe_smart_speed_off' to
268 ** disable.
269 */
270 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
271
272 /*
273  * MSIX should be the default for best performance,
274  * but this allows it to be forced off for testing.
275  */
276 static int ixgbe_enable_msix = 1;
277 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278     "Enable MSI-X interrupts");
279
280 /*
281  * Number of Queues, can be set to 0,
282  * it then autoconfigures based on the
283  * number of cpus with a max of 8. This
284  * can be overriden manually here.
285  */
286 static int ixgbe_num_queues = 0;
287 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288     "Number of queues to configure, 0 indicates autoconfigure");
289
290 /*
291 ** Number of TX descriptors per ring,
292 ** setting higher than RX as this seems
293 ** the better performing choice.
294 */
295 static int ixgbe_txd = PERFORM_TXD;
296 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
297     "Number of transmit descriptors per queue");
298
299 /* Number of RX descriptors per ring */
300 static int ixgbe_rxd = PERFORM_RXD;
301 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
302     "Number of receive descriptors per queue");
303
304 /*
305 ** Defining this on will allow the use
306 ** of unsupported SFP+ modules, note that
307 ** doing so you are on your own :)
308 */
309 static int allow_unsupported_sfp = FALSE;
310 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
311
312 /* Keep running tab on them for sanity check */
313 static int ixgbe_total_ports;
314
315 #ifdef IXGBE_FDIR
316 /* 
317 ** Flow Director actually 'steals'
318 ** part of the packet buffer as its
319 ** filter pool, this variable controls
320 ** how much it uses:
321 **  0 = 64K, 1 = 128K, 2 = 256K
322 */
323 static int fdir_pballoc = 1;
324 #endif
325
326 #ifdef DEV_NETMAP
327 /*
328  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
329  * be a reference on how to implement netmap support in a driver.
330  * Additional comments are in ixgbe_netmap.h .
331  *
332  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
333  * that extend the standard driver.
334  */
335 #include <dev/netmap/ixgbe_netmap.h>
336 #endif /* DEV_NETMAP */
337
338 /*********************************************************************
339  *  Device identification routine
340  *
341  *  ixgbe_probe determines if the driver should be loaded on
342  *  adapter based on PCI vendor/device id of the adapter.
343  *
344  *  return BUS_PROBE_DEFAULT on success, positive on failure
345  *********************************************************************/
346
347 static int
348 ixgbe_probe(device_t dev)
349 {
350         ixgbe_vendor_info_t *ent;
351
352         u16     pci_vendor_id = 0;
353         u16     pci_device_id = 0;
354         u16     pci_subvendor_id = 0;
355         u16     pci_subdevice_id = 0;
356         char    adapter_name[256];
357
358         INIT_DEBUGOUT("ixgbe_probe: begin");
359
360         pci_vendor_id = pci_get_vendor(dev);
361         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
362                 return (ENXIO);
363
364         pci_device_id = pci_get_device(dev);
365         pci_subvendor_id = pci_get_subvendor(dev);
366         pci_subdevice_id = pci_get_subdevice(dev);
367
368         ent = ixgbe_vendor_info_array;
369         while (ent->vendor_id != 0) {
370                 if ((pci_vendor_id == ent->vendor_id) &&
371                     (pci_device_id == ent->device_id) &&
372
373                     ((pci_subvendor_id == ent->subvendor_id) ||
374                      (ent->subvendor_id == 0)) &&
375
376                     ((pci_subdevice_id == ent->subdevice_id) ||
377                      (ent->subdevice_id == 0))) {
378                         sprintf(adapter_name, "%s, Version - %s",
379                                 ixgbe_strings[ent->index],
380                                 ixgbe_driver_version);
381                         device_set_desc_copy(dev, adapter_name);
382                         ++ixgbe_total_ports;
383                         return (BUS_PROBE_DEFAULT);
384                 }
385                 ent++;
386         }
387         return (ENXIO);
388 }
389
390 /*********************************************************************
391  *  Device initialization routine
392  *
393  *  The attach entry point is called when the driver is being loaded.
394  *  This routine identifies the type of hardware, allocates all resources
395  *  and initializes the hardware.
396  *
397  *  return 0 on success, positive on failure
398  *********************************************************************/
399
400 static int
401 ixgbe_attach(device_t dev)
402 {
403         struct adapter *adapter;
404         struct ixgbe_hw *hw;
405         int             error = 0;
406         u16             csum;
407         u32             ctrl_ext;
408
409         INIT_DEBUGOUT("ixgbe_attach: begin");
410
411         /* Allocate, clear, and link in our adapter structure */
412         adapter = device_get_softc(dev);
413         adapter->dev = adapter->osdep.dev = dev;
414         hw = &adapter->hw;
415
416         /* Core Lock Init*/
417         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
418
419         /* Set up the timer callout */
420         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
421
422         /* Determine hardware revision */
423         ixgbe_identify_hardware(adapter);
424
425         /* Do base PCI setup - map BAR0 */
426         if (ixgbe_allocate_pci_resources(adapter)) {
427                 device_printf(dev, "Allocation of PCI resources failed\n");
428                 error = ENXIO;
429                 goto err_out;
430         }
431
432         /* Do descriptor calc and sanity checks */
433         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
434             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
435                 device_printf(dev, "TXD config issue, using default!\n");
436                 adapter->num_tx_desc = DEFAULT_TXD;
437         } else
438                 adapter->num_tx_desc = ixgbe_txd;
439
440         /*
441         ** With many RX rings it is easy to exceed the
442         ** system mbuf allocation. Tuning nmbclusters
443         ** can alleviate this.
444         */
445         if (nmbclusters > 0) {
446                 int s;
447                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
448                 if (s > nmbclusters) {
449                         device_printf(dev, "RX Descriptors exceed "
450                             "system mbuf max, using default instead!\n");
451                         ixgbe_rxd = DEFAULT_RXD;
452                 }
453         }
454
455         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
456             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
457                 device_printf(dev, "RXD config issue, using default!\n");
458                 adapter->num_rx_desc = DEFAULT_RXD;
459         } else
460                 adapter->num_rx_desc = ixgbe_rxd;
461
462         /* Allocate our TX/RX Queues */
463         if (ixgbe_allocate_queues(adapter)) {
464                 error = ENOMEM;
465                 goto err_out;
466         }
467
468         /* Allocate multicast array memory. */
469         adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
470             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
471         if (adapter->mta == NULL) {
472                 device_printf(dev, "Can not allocate multicast setup array\n");
473                 error = ENOMEM;
474                 goto err_late;
475         }
476
477         /* Initialize the shared code */
478         hw->allow_unsupported_sfp = allow_unsupported_sfp;
479         error = ixgbe_init_shared_code(hw);
480         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
481                 /*
482                 ** No optics in this port, set up
483                 ** so the timer routine will probe 
484                 ** for later insertion.
485                 */
486                 adapter->sfp_probe = TRUE;
487                 error = 0;
488         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
489                 device_printf(dev,"Unsupported SFP+ module detected!\n");
490                 error = EIO;
491                 goto err_late;
492         } else if (error) {
493                 device_printf(dev,"Unable to initialize the shared code\n");
494                 error = EIO;
495                 goto err_late;
496         }
497
498         /* Make sure we have a good EEPROM before we read from it */
499         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
500                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
501                 error = EIO;
502                 goto err_late;
503         }
504
505         error = ixgbe_init_hw(hw);
506         switch (error) {
507         case IXGBE_ERR_EEPROM_VERSION:
508                 device_printf(dev, "This device is a pre-production adapter/"
509                     "LOM.  Please be aware there may be issues associated "
510                     "with your hardware.\n If you are experiencing problems "
511                     "please contact your Intel or hardware representative "
512                     "who provided you with this hardware.\n");
513                 break;
514         case IXGBE_ERR_SFP_NOT_SUPPORTED:
515                 device_printf(dev,"Unsupported SFP+ Module\n");
516                 error = EIO;
517                 goto err_late;
518         case IXGBE_ERR_SFP_NOT_PRESENT:
519                 device_printf(dev,"No SFP+ Module found\n");
520                 /* falls thru */
521         default:
522                 break;
523         }
524
525         /* Detect and set physical type */
526         ixgbe_setup_optics(adapter);
527
528         if ((adapter->msix > 1) && (ixgbe_enable_msix))
529                 error = ixgbe_allocate_msix(adapter); 
530         else
531                 error = ixgbe_allocate_legacy(adapter); 
532         if (error) 
533                 goto err_late;
534
535         /* Setup OS specific network interface */
536         if (ixgbe_setup_interface(dev, adapter) != 0)
537                 goto err_late;
538
539         /* Initialize statistics */
540         ixgbe_update_stats_counters(adapter);
541
542         /* Register for VLAN events */
543         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
544             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
545         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
546             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
547
548         /* Check PCIE slot type/speed/width */
549         ixgbe_get_slot_info(hw);
550
551
552         /* Set an initial default flow control value */
553         adapter->fc = ixgbe_fc_full;
554
555         /* Check for certain supported features */
556         ixgbe_check_wol_support(adapter);
557         ixgbe_check_eee_support(adapter);
558
559         /* Add sysctls */
560         ixgbe_add_device_sysctls(adapter);
561         ixgbe_add_hw_stats(adapter);
562
563         /* let hardware know driver is loaded */
564         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
567
568 #ifdef DEV_NETMAP
569         ixgbe_netmap_attach(adapter);
570 #endif /* DEV_NETMAP */
571         INIT_DEBUGOUT("ixgbe_attach: end");
572         return (0);
573
574 err_late:
575         ixgbe_free_transmit_structures(adapter);
576         ixgbe_free_receive_structures(adapter);
577 err_out:
578         if (adapter->ifp != NULL)
579                 if_free(adapter->ifp);
580         ixgbe_free_pci_resources(adapter);
581         free(adapter->mta, M_DEVBUF);
582         return (error);
583 }
584
585 /*********************************************************************
586  *  Device removal routine
587  *
588  *  The detach entry point is called when the driver is being removed.
589  *  This routine stops the adapter and deallocates all the resources
590  *  that were allocated for driver operation.
591  *
592  *  return 0 on success, positive on failure
593  *********************************************************************/
594
595 static int
596 ixgbe_detach(device_t dev)
597 {
598         struct adapter *adapter = device_get_softc(dev);
599         struct ix_queue *que = adapter->queues;
600         struct tx_ring *txr = adapter->tx_rings;
601         u32     ctrl_ext;
602
603         INIT_DEBUGOUT("ixgbe_detach: begin");
604
605         /* Make sure VLANS are not using driver */
606         if (adapter->ifp->if_vlantrunk != NULL) {
607                 device_printf(dev,"Vlan in use, detach first\n");
608                 return (EBUSY);
609         }
610
611         /* Stop the adapter */
612         IXGBE_CORE_LOCK(adapter);
613         ixgbe_setup_low_power_mode(adapter);
614         IXGBE_CORE_UNLOCK(adapter);
615
616         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
617                 if (que->tq) {
618 #ifndef IXGBE_LEGACY_TX
619                         taskqueue_drain(que->tq, &txr->txq_task);
620 #endif
621                         taskqueue_drain(que->tq, &que->que_task);
622                         taskqueue_free(que->tq);
623                 }
624         }
625
626         /* Drain the Link queue */
627         if (adapter->tq) {
628                 taskqueue_drain(adapter->tq, &adapter->link_task);
629                 taskqueue_drain(adapter->tq, &adapter->mod_task);
630                 taskqueue_drain(adapter->tq, &adapter->msf_task);
631                 taskqueue_drain(adapter->tq, &adapter->phy_task);
632 #ifdef IXGBE_FDIR
633                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
634 #endif
635                 taskqueue_free(adapter->tq);
636         }
637
638         /* let hardware know driver is unloading */
639         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
642
643         /* Unregister VLAN events */
644         if (adapter->vlan_attach != NULL)
645                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646         if (adapter->vlan_detach != NULL)
647                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
648
649         ether_ifdetach(adapter->ifp);
650         callout_drain(&adapter->timer);
651 #ifdef DEV_NETMAP
652         netmap_detach(adapter->ifp);
653 #endif /* DEV_NETMAP */
654         ixgbe_free_pci_resources(adapter);
655         bus_generic_detach(dev);
656         if_free(adapter->ifp);
657
658         ixgbe_free_transmit_structures(adapter);
659         ixgbe_free_receive_structures(adapter);
660         free(adapter->mta, M_DEVBUF);
661
662         IXGBE_CORE_LOCK_DESTROY(adapter);
663         return (0);
664 }
665
666 /*********************************************************************
667  *
668  *  Shutdown entry point
669  *
670  **********************************************************************/
671
672 static int
673 ixgbe_shutdown(device_t dev)
674 {
675         struct adapter *adapter = device_get_softc(dev);
676         int error = 0;
677
678         INIT_DEBUGOUT("ixgbe_shutdown: begin");
679
680         IXGBE_CORE_LOCK(adapter);
681         error = ixgbe_setup_low_power_mode(adapter);
682         IXGBE_CORE_UNLOCK(adapter);
683
684         return (error);
685 }
686
687 /**
688  * Methods for going from:
689  * D0 -> D3: ixgbe_suspend
690  * D3 -> D0: ixgbe_resume
691  */
692 static int
693 ixgbe_suspend(device_t dev)
694 {
695         struct adapter *adapter = device_get_softc(dev);
696         int error = 0;
697
698         INIT_DEBUGOUT("ixgbe_suspend: begin");
699
700         IXGBE_CORE_LOCK(adapter);
701
702         error = ixgbe_setup_low_power_mode(adapter);
703
704         /* Save state and power down */
705         pci_save_state(dev);
706         pci_set_powerstate(dev, PCI_POWERSTATE_D3);
707
708         IXGBE_CORE_UNLOCK(adapter);
709
710         return (error);
711 }
712
713 static int
714 ixgbe_resume(device_t dev)
715 {
716         struct adapter *adapter = device_get_softc(dev);
717         struct ifnet *ifp = adapter->ifp;
718         struct ixgbe_hw *hw = &adapter->hw;
719         u32 wus;
720
721         INIT_DEBUGOUT("ixgbe_resume: begin");
722
723         IXGBE_CORE_LOCK(adapter);
724
725         pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726         pci_restore_state(dev);
727
728         /* Read & clear WUS register */
729         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
730         if (wus)
731                 device_printf(dev, "Woken up by (WUS): %#010x\n",
732                     IXGBE_READ_REG(hw, IXGBE_WUS));
733         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734         /* And clear WUFC until next low-power transition */
735         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
736
737         /*
738          * Required after D3->D0 transition;
739          * will re-advertise all previous advertised speeds
740          */
741         if (ifp->if_flags & IFF_UP)
742                 ixgbe_init_locked(adapter);
743
744         IXGBE_CORE_UNLOCK(adapter);
745
746         INIT_DEBUGOUT("ixgbe_resume: end");
747         return (0);
748 }
749
750
751 /*********************************************************************
752  *  Ioctl entry point
753  *
754  *  ixgbe_ioctl is called when the user wants to configure the
755  *  interface.
756  *
757  *  return 0 on success, positive on failure
758  **********************************************************************/
759
760 static int
761 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
762 {
763         struct adapter  *adapter = ifp->if_softc;
764         struct ifreq    *ifr = (struct ifreq *) data;
765 #if defined(INET) || defined(INET6)
766         struct ifaddr *ifa = (struct ifaddr *)data;
767         bool            avoid_reset = FALSE;
768 #endif
769         int             error = 0;
770
771         switch (command) {
772
773         case SIOCSIFADDR:
774 #ifdef INET
775                 if (ifa->ifa_addr->sa_family == AF_INET)
776                         avoid_reset = TRUE;
777 #endif
778 #ifdef INET6
779                 if (ifa->ifa_addr->sa_family == AF_INET6)
780                         avoid_reset = TRUE;
781 #endif
782 #if defined(INET) || defined(INET6)
783                 /*
784                 ** Calling init results in link renegotiation,
785                 ** so we avoid doing it when possible.
786                 */
787                 if (avoid_reset) {
788                         ifp->if_flags |= IFF_UP;
789                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
790                                 ixgbe_init(adapter);
791                         if (!(ifp->if_flags & IFF_NOARP))
792                                 arp_ifinit(ifp, ifa);
793                 } else
794                         error = ether_ioctl(ifp, command, data);
795 #endif
796                 break;
797         case SIOCSIFMTU:
798                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
800                         error = EINVAL;
801                 } else {
802                         IXGBE_CORE_LOCK(adapter);
803                         ifp->if_mtu = ifr->ifr_mtu;
804                         adapter->max_frame_size =
805                                 ifp->if_mtu + IXGBE_MTU_HDR;
806                         ixgbe_init_locked(adapter);
807                         IXGBE_CORE_UNLOCK(adapter);
808                 }
809                 break;
810         case SIOCSIFFLAGS:
811                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812                 IXGBE_CORE_LOCK(adapter);
813                 if (ifp->if_flags & IFF_UP) {
814                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815                                 if ((ifp->if_flags ^ adapter->if_flags) &
816                                     (IFF_PROMISC | IFF_ALLMULTI)) {
817                                         ixgbe_set_promisc(adapter);
818                                 }
819                         } else
820                                 ixgbe_init_locked(adapter);
821                 } else
822                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
823                                 ixgbe_stop(adapter);
824                 adapter->if_flags = ifp->if_flags;
825                 IXGBE_CORE_UNLOCK(adapter);
826                 break;
827         case SIOCADDMULTI:
828         case SIOCDELMULTI:
829                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831                         IXGBE_CORE_LOCK(adapter);
832                         ixgbe_disable_intr(adapter);
833                         ixgbe_set_multi(adapter);
834                         ixgbe_enable_intr(adapter);
835                         IXGBE_CORE_UNLOCK(adapter);
836                 }
837                 break;
838         case SIOCSIFMEDIA:
839         case SIOCGIFMEDIA:
840                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
842                 break;
843         case SIOCSIFCAP:
844         {
845                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847                 if (mask & IFCAP_HWCSUM)
848                         ifp->if_capenable ^= IFCAP_HWCSUM;
849                 if (mask & IFCAP_TSO4)
850                         ifp->if_capenable ^= IFCAP_TSO4;
851                 if (mask & IFCAP_TSO6)
852                         ifp->if_capenable ^= IFCAP_TSO6;
853                 if (mask & IFCAP_LRO)
854                         ifp->if_capenable ^= IFCAP_LRO;
855                 if (mask & IFCAP_VLAN_HWTAGGING)
856                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857                 if (mask & IFCAP_VLAN_HWFILTER)
858                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859                 if (mask & IFCAP_VLAN_HWTSO)
860                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862                         IXGBE_CORE_LOCK(adapter);
863                         ixgbe_init_locked(adapter);
864                         IXGBE_CORE_UNLOCK(adapter);
865                 }
866                 VLAN_CAPABILITIES(ifp);
867                 break;
868         }
869 #if __FreeBSD_version >= 1100036
870         case SIOCGI2C:
871         {
872                 struct ixgbe_hw *hw = &adapter->hw;
873                 struct ifi2creq i2c;
874                 int i;
875                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
877                 if (error != 0)
878                         break;
879                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
880                         error = EINVAL;
881                         break;
882                 }
883                 if (i2c.len > sizeof(i2c.data)) {
884                         error = EINVAL;
885                         break;
886                 }
887
888                 for (i = 0; i < i2c.len; i++)
889                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890                             i2c.dev_addr, &i2c.data[i]);
891                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
892                 break;
893         }
894 #endif
895         default:
896                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897                 error = ether_ioctl(ifp, command, data);
898                 break;
899         }
900
901         return (error);
902 }
903
904 /*********************************************************************
905  *  Init entry point
906  *
907  *  This routine is used in two ways. It is used by the stack as
908  *  init entry point in network interface structure. It is also used
909  *  by the driver as a hw/sw initialization routine to get to a
910  *  consistent state.
911  *
912  *  return 0 on success, positive on failure
913  **********************************************************************/
914 #define IXGBE_MHADD_MFS_SHIFT 16
915
916 static void
917 ixgbe_init_locked(struct adapter *adapter)
918 {
919         struct ifnet   *ifp = adapter->ifp;
920         device_t        dev = adapter->dev;
921         struct ixgbe_hw *hw = &adapter->hw;
922         u32             k, txdctl, mhadd, gpie;
923         u32             rxdctl, rxctrl;
924
925         mtx_assert(&adapter->core_mtx, MA_OWNED);
926         INIT_DEBUGOUT("ixgbe_init_locked: begin");
927         hw->adapter_stopped = FALSE;
928         ixgbe_stop_adapter(hw);
929         callout_stop(&adapter->timer);
930
931         /* reprogram the RAR[0] in case user changed it. */
932         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
933
934         /* Get the latest mac address, User can use a LAA */
935         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936               IXGBE_ETH_LENGTH_OF_ADDRESS);
937         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938         hw->addr_ctrl.rar_used_count = 1;
939
940         /* Set the various hardware offload abilities */
941         ifp->if_hwassist = 0;
942         if (ifp->if_capenable & IFCAP_TSO)
943                 ifp->if_hwassist |= CSUM_TSO;
944         if (ifp->if_capenable & IFCAP_TXCSUM) {
945                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946 #if __FreeBSD_version >= 800000
947                 if (hw->mac.type != ixgbe_mac_82598EB)
948                         ifp->if_hwassist |= CSUM_SCTP;
949 #endif
950         }
951
952         /* Prepare transmit descriptors and buffers */
953         if (ixgbe_setup_transmit_structures(adapter)) {
954                 device_printf(dev, "Could not setup transmit structures\n");
955                 ixgbe_stop(adapter);
956                 return;
957         }
958
959         ixgbe_init_hw(hw);
960         ixgbe_initialize_transmit_units(adapter);
961
962         /* Setup Multicast table */
963         ixgbe_set_multi(adapter);
964
965         /*
966         ** Determine the correct mbuf pool
967         ** for doing jumbo frames
968         */
969         if (adapter->max_frame_size <= 2048)
970                 adapter->rx_mbuf_sz = MCLBYTES;
971         else if (adapter->max_frame_size <= 4096)
972                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
973         else if (adapter->max_frame_size <= 9216)
974                 adapter->rx_mbuf_sz = MJUM9BYTES;
975         else
976                 adapter->rx_mbuf_sz = MJUM16BYTES;
977
978         /* Prepare receive descriptors and buffers */
979         if (ixgbe_setup_receive_structures(adapter)) {
980                 device_printf(dev, "Could not setup receive structures\n");
981                 ixgbe_stop(adapter);
982                 return;
983         }
984
985         /* Configure RX settings */
986         ixgbe_initialize_receive_units(adapter);
987
988         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
989
990         /* Enable Fan Failure Interrupt */
991         gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
992
993         /* Add for Module detection */
994         if (hw->mac.type == ixgbe_mac_82599EB)
995                 gpie |= IXGBE_SDP2_GPIEN;
996
997         /*
998          * Thermal Failure Detection (X540)
999          * Link Detection (X552)
1000          */
1001         if (hw->mac.type == ixgbe_mac_X540 ||
1002             hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004                 gpie |= IXGBE_SDP0_GPIEN_X540;
1005
1006         if (adapter->msix > 1) {
1007                 /* Enable Enhanced MSIX mode */
1008                 gpie |= IXGBE_GPIE_MSIX_MODE;
1009                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1010                     IXGBE_GPIE_OCD;
1011         }
1012         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1013
1014         /* Set MTU size */
1015         if (ifp->if_mtu > ETHERMTU) {
1016                 /* aka IXGBE_MAXFRS on 82599 and newer */
1017                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1021         }
1022         
1023         /* Now enable all the queues */
1024         for (int i = 0; i < adapter->num_queues; i++) {
1025                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026                 txdctl |= IXGBE_TXDCTL_ENABLE;
1027                 /* Set WTHRESH to 8, burst writeback */
1028                 txdctl |= (8 << 16);
1029                 /*
1030                  * When the internal queue falls below PTHRESH (32),
1031                  * start prefetching as long as there are at least
1032                  * HTHRESH (1) buffers ready. The values are taken
1033                  * from the Intel linux driver 3.8.21.
1034                  * Prefetching enables tx line rate even with 1 queue.
1035                  */
1036                 txdctl |= (32 << 0) | (1 << 8);
1037                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1038         }
1039
1040         for (int i = 0; i < adapter->num_queues; i++) {
1041                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042                 if (hw->mac.type == ixgbe_mac_82598EB) {
1043                         /*
1044                         ** PTHRESH = 21
1045                         ** HTHRESH = 4
1046                         ** WTHRESH = 8
1047                         */
1048                         rxdctl &= ~0x3FFFFF;
1049                         rxdctl |= 0x080420;
1050                 }
1051                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1052                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053                 for (k = 0; k < 10; k++) {
1054                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055                             IXGBE_RXDCTL_ENABLE)
1056                                 break;
1057                         else
1058                                 msec_delay(1);
1059                 }
1060                 wmb();
1061 #ifdef DEV_NETMAP
1062                 /*
1063                  * In netmap mode, we must preserve the buffers made
1064                  * available to userspace before the if_init()
1065                  * (this is true by default on the TX side, because
1066                  * init makes all buffers available to userspace).
1067                  *
1068                  * netmap_reset() and the device specific routines
1069                  * (e.g. ixgbe_setup_receive_rings()) map these
1070                  * buffers at the end of the NIC ring, so here we
1071                  * must set the RDT (tail) register to make sure
1072                  * they are not overwritten.
1073                  *
1074                  * In this driver the NIC ring starts at RDH = 0,
1075                  * RDT points to the last slot available for reception (?),
1076                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1077                  */
1078                 if (ifp->if_capenable & IFCAP_NETMAP) {
1079                         struct netmap_adapter *na = NA(adapter->ifp);
1080                         struct netmap_kring *kring = &na->rx_rings[i];
1081                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1082
1083                         IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1084                 } else
1085 #endif /* DEV_NETMAP */
1086                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1087         }
1088
1089         /* Enable Receive engine */
1090         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091         if (hw->mac.type == ixgbe_mac_82598EB)
1092                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093         rxctrl |= IXGBE_RXCTRL_RXEN;
1094         ixgbe_enable_rx_dma(hw, rxctrl);
1095
1096         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1097
1098         /* Set up MSI/X routing */
1099         if (ixgbe_enable_msix)  {
1100                 ixgbe_configure_ivars(adapter);
1101                 /* Set up auto-mask */
1102                 if (hw->mac.type == ixgbe_mac_82598EB)
1103                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1104                 else {
1105                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1107                 }
1108         } else {  /* Simple settings for Legacy/MSI */
1109                 ixgbe_set_ivar(adapter, 0, 0, 0);
1110                 ixgbe_set_ivar(adapter, 0, 0, 1);
1111                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1112         }
1113
1114 #ifdef IXGBE_FDIR
1115         /* Init Flow director */
1116         if (hw->mac.type != ixgbe_mac_82598EB) {
1117                 u32 hdrm = 32 << fdir_pballoc;
1118
1119                 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1121         }
1122 #endif
1123
1124         /*
1125         ** Check on any SFP devices that
1126         ** need to be kick-started
1127         */
1128         if (hw->phy.type == ixgbe_phy_none) {
1129                 int err = hw->phy.ops.identify(hw);
1130                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1131                         device_printf(dev,
1132                             "Unsupported SFP+ module type was detected.\n");
1133                         return;
1134                 }
1135         }
1136
1137         /* Set moderation on the Link interrupt */
1138         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1139
1140         /* Configure Energy Efficient Ethernet for supported devices */
1141         if (adapter->eee_support)
1142                 ixgbe_setup_eee(hw, adapter->eee_enabled);
1143
1144         /* Config/Enable Link */
1145         ixgbe_config_link(adapter);
1146
1147         /* Hardware Packet Buffer & Flow Control setup */
1148         ixgbe_config_delay_values(adapter);
1149
1150         /* Initialize the FC settings */
1151         ixgbe_start_hw(hw);
1152
1153         /* Set up VLAN support and filter */
1154         ixgbe_setup_vlan_hw_support(adapter);
1155
1156         /* Setup DMA Coalescing */
1157         ixgbe_config_dmac(adapter);
1158
1159         /* And now turn on interrupts */
1160         ixgbe_enable_intr(adapter);
1161
1162         /* Now inform the stack we're ready */
1163         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1164
1165         return;
1166 }
1167
1168 static void
1169 ixgbe_init(void *arg)
1170 {
1171         struct adapter *adapter = arg;
1172
1173         IXGBE_CORE_LOCK(adapter);
1174         ixgbe_init_locked(adapter);
1175         IXGBE_CORE_UNLOCK(adapter);
1176         return;
1177 }
1178
1179 static void
1180 ixgbe_config_delay_values(struct adapter *adapter)
1181 {
1182         struct ixgbe_hw *hw = &adapter->hw;
1183         u32 rxpb, frame, size, tmp;
1184
1185         frame = adapter->max_frame_size;
1186
1187         /* Calculate High Water */
1188         switch (hw->mac.type) {
1189         case ixgbe_mac_X540:
1190         case ixgbe_mac_X550:
1191         case ixgbe_mac_X550EM_x:
1192                 tmp = IXGBE_DV_X540(frame, frame);
1193                 break;
1194         default:
1195                 tmp = IXGBE_DV(frame, frame);
1196                 break;
1197         }
1198         size = IXGBE_BT2KB(tmp);
1199         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200         hw->fc.high_water[0] = rxpb - size;
1201
1202         /* Now calculate Low Water */
1203         switch (hw->mac.type) {
1204         case ixgbe_mac_X540:
1205         case ixgbe_mac_X550:
1206         case ixgbe_mac_X550EM_x:
1207                 tmp = IXGBE_LOW_DV_X540(frame);
1208                 break;
1209         default:
1210                 tmp = IXGBE_LOW_DV(frame);
1211                 break;
1212         }
1213         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1214
1215         hw->fc.requested_mode = adapter->fc;
1216         hw->fc.pause_time = IXGBE_FC_PAUSE;
1217         hw->fc.send_xon = TRUE;
1218 }
1219
1220 /*
1221 **
1222 ** MSIX Interrupt Handlers and Tasklets
1223 **
1224 */
1225
1226 static inline void
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1228 {
1229         struct ixgbe_hw *hw = &adapter->hw;
1230         u64     queue = (u64)(1 << vector);
1231         u32     mask;
1232
1233         if (hw->mac.type == ixgbe_mac_82598EB) {
1234                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1236         } else {
1237                 mask = (queue & 0xFFFFFFFF);
1238                 if (mask)
1239                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240                 mask = (queue >> 32);
1241                 if (mask)
1242                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1243         }
1244 }
1245
1246 static inline void
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1248 {
1249         struct ixgbe_hw *hw = &adapter->hw;
1250         u64     queue = (u64)(1 << vector);
1251         u32     mask;
1252
1253         if (hw->mac.type == ixgbe_mac_82598EB) {
1254                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1256         } else {
1257                 mask = (queue & 0xFFFFFFFF);
1258                 if (mask)
1259                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260                 mask = (queue >> 32);
1261                 if (mask)
1262                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1263         }
1264 }
1265
1266 static void
1267 ixgbe_handle_que(void *context, int pending)
1268 {
1269         struct ix_queue *que = context;
1270         struct adapter  *adapter = que->adapter;
1271         struct tx_ring  *txr = que->txr;
1272         struct ifnet    *ifp = adapter->ifp;
1273         bool            more;
1274
1275         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276                 more = ixgbe_rxeof(que);
1277                 IXGBE_TX_LOCK(txr);
1278                 ixgbe_txeof(txr);
1279 #ifndef IXGBE_LEGACY_TX
1280                 if (!drbr_empty(ifp, txr->br))
1281                         ixgbe_mq_start_locked(ifp, txr);
1282 #else
1283                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284                         ixgbe_start_locked(txr, ifp);
1285 #endif
1286                 IXGBE_TX_UNLOCK(txr);
1287         }
1288
1289         /* Reenable this interrupt */
1290         if (que->res != NULL)
1291                 ixgbe_enable_queue(adapter, que->msix);
1292         else
1293                 ixgbe_enable_intr(adapter);
1294         return;
1295 }
1296
1297
1298 /*********************************************************************
1299  *
1300  *  Legacy Interrupt Service routine
1301  *
1302  **********************************************************************/
1303
1304 static void
1305 ixgbe_legacy_irq(void *arg)
1306 {
1307         struct ix_queue *que = arg;
1308         struct adapter  *adapter = que->adapter;
1309         struct ixgbe_hw *hw = &adapter->hw;
1310         struct ifnet    *ifp = adapter->ifp;
1311         struct          tx_ring *txr = adapter->tx_rings;
1312         bool            more;
1313         u32             reg_eicr;
1314
1315
1316         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1317
1318         ++que->irqs;
1319         if (reg_eicr == 0) {
1320                 ixgbe_enable_intr(adapter);
1321                 return;
1322         }
1323
1324         more = ixgbe_rxeof(que);
1325
1326         IXGBE_TX_LOCK(txr);
1327         ixgbe_txeof(txr);
1328 #ifdef IXGBE_LEGACY_TX
1329         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330                 ixgbe_start_locked(txr, ifp);
1331 #else
1332         if (!drbr_empty(ifp, txr->br))
1333                 ixgbe_mq_start_locked(ifp, txr);
1334 #endif
1335         IXGBE_TX_UNLOCK(txr);
1336
1337         /* Check for fan failure */
1338         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339             (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341                     "REPLACE IMMEDIATELY!!\n");
1342                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1343         }
1344
1345         /* Link status change */
1346         if (reg_eicr & IXGBE_EICR_LSC)
1347                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1348
1349         /* External PHY interrupt */
1350         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1353
1354         if (more)
1355                 taskqueue_enqueue(que->tq, &que->que_task);
1356         else
1357                 ixgbe_enable_intr(adapter);
1358         return;
1359 }
1360
1361
1362 /*********************************************************************
1363  *
1364  *  MSIX Queue Interrupt Service routine
1365  *
1366  **********************************************************************/
1367 void
1368 ixgbe_msix_que(void *arg)
1369 {
1370         struct ix_queue *que = arg;
1371         struct adapter  *adapter = que->adapter;
1372         struct ifnet    *ifp = adapter->ifp;
1373         struct tx_ring  *txr = que->txr;
1374         struct rx_ring  *rxr = que->rxr;
1375         bool            more;
1376         u32             newitr = 0;
1377
1378         /* Protect against spurious interrupts */
1379         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1380                 return;
1381
1382         ixgbe_disable_queue(adapter, que->msix);
1383         ++que->irqs;
1384
1385         more = ixgbe_rxeof(que);
1386
1387         IXGBE_TX_LOCK(txr);
1388         ixgbe_txeof(txr);
1389 #ifdef IXGBE_LEGACY_TX
1390         if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391                 ixgbe_start_locked(txr, ifp);
1392 #else
1393         if (!drbr_empty(ifp, txr->br))
1394                 ixgbe_mq_start_locked(ifp, txr);
1395 #endif
1396         IXGBE_TX_UNLOCK(txr);
1397
1398         /* Do AIM now? */
1399
1400         if (ixgbe_enable_aim == FALSE)
1401                 goto no_calc;
1402         /*
1403         ** Do Adaptive Interrupt Moderation:
1404         **  - Write out last calculated setting
1405         **  - Calculate based on average size over
1406         **    the last interval.
1407         */
1408         if (que->eitr_setting)
1409                 IXGBE_WRITE_REG(&adapter->hw,
1410                     IXGBE_EITR(que->msix), que->eitr_setting);
1411  
1412         que->eitr_setting = 0;
1413
1414         /* Idle, do nothing */
1415         if ((txr->bytes == 0) && (rxr->bytes == 0))
1416                 goto no_calc;
1417                                 
1418         if ((txr->bytes) && (txr->packets))
1419                 newitr = txr->bytes/txr->packets;
1420         if ((rxr->bytes) && (rxr->packets))
1421                 newitr = max(newitr,
1422                     (rxr->bytes / rxr->packets));
1423         newitr += 24; /* account for hardware frame, crc */
1424
1425         /* set an upper boundary */
1426         newitr = min(newitr, 3000);
1427
1428         /* Be nice to the mid range */
1429         if ((newitr > 300) && (newitr < 1200))
1430                 newitr = (newitr / 3);
1431         else
1432                 newitr = (newitr / 2);
1433
1434         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435                 newitr |= newitr << 16;
1436         else
1437                 newitr |= IXGBE_EITR_CNT_WDIS;
1438                  
1439         /* save for next interrupt */
1440         que->eitr_setting = newitr;
1441
1442         /* Reset state */
1443         txr->bytes = 0;
1444         txr->packets = 0;
1445         rxr->bytes = 0;
1446         rxr->packets = 0;
1447
1448 no_calc:
1449         if (more)
1450                 taskqueue_enqueue(que->tq, &que->que_task);
1451         else
1452                 ixgbe_enable_queue(adapter, que->msix);
1453         return;
1454 }
1455
1456
1457 static void
1458 ixgbe_msix_link(void *arg)
1459 {
1460         struct adapter  *adapter = arg;
1461         struct ixgbe_hw *hw = &adapter->hw;
1462         u32             reg_eicr, mod_mask;
1463
1464         ++adapter->link_irq;
1465
1466         /* First get the cause */
1467         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468         /* Be sure the queue bits are not cleared */
1469         reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470         /* Clear interrupt with write */
1471         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1472
1473         /* Link status change */
1474         if (reg_eicr & IXGBE_EICR_LSC)
1475                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1476
1477         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1478 #ifdef IXGBE_FDIR
1479                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480                         /* This is probably overkill :) */
1481                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1482                                 return;
1483                         /* Disable the interrupt */
1484                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1486                 } else
1487 #endif
1488                 if (reg_eicr & IXGBE_EICR_ECC) {
1489                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490                             "Please Reboot!!\n");
1491                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1492                 }
1493
1494                 /* Check for over temp condition */
1495                 if (reg_eicr & IXGBE_EICR_TS) {
1496                         device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497                             "PHY IS SHUT DOWN!!\n");
1498                         device_printf(adapter->dev, "System shutdown required!\n");
1499                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1500                 }
1501         }
1502
1503         /* Pluggable optics-related interrupt */
1504         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505                 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1506         else
1507                 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1508
1509         if (ixgbe_is_sfp(hw)) {
1510                 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513                 } else if (reg_eicr & mod_mask) {
1514                         IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1516                 }
1517         }
1518
1519         /* Check for fan failure */
1520         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524                     "REPLACE IMMEDIATELY!!\n");
1525         }
1526
1527         /* External PHY interrupt */
1528         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1532         }
1533
1534         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1535         return;
1536 }
1537
1538 /*********************************************************************
1539  *
1540  *  Media Ioctl callback
1541  *
1542  *  This routine is called whenever the user queries the status of
1543  *  the interface using ifconfig.
1544  *
1545  **********************************************************************/
1546 static void
1547 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1548 {
1549         struct adapter *adapter = ifp->if_softc;
1550         struct ixgbe_hw *hw = &adapter->hw;
1551         int layer;
1552
1553         INIT_DEBUGOUT("ixgbe_media_status: begin");
1554         IXGBE_CORE_LOCK(adapter);
1555         ixgbe_update_link_status(adapter);
1556
1557         ifmr->ifm_status = IFM_AVALID;
1558         ifmr->ifm_active = IFM_ETHER;
1559
1560         if (!adapter->link_active) {
1561                 IXGBE_CORE_UNLOCK(adapter);
1562                 return;
1563         }
1564
1565         ifmr->ifm_status |= IFM_ACTIVE;
1566         layer = ixgbe_get_supported_physical_layer(hw);
1567
1568         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571                 switch (adapter->link_speed) {
1572                 case IXGBE_LINK_SPEED_10GB_FULL:
1573                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1574                         break;
1575                 case IXGBE_LINK_SPEED_1GB_FULL:
1576                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1577                         break;
1578                 case IXGBE_LINK_SPEED_100_FULL:
1579                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1580                         break;
1581                 }
1582         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584                 switch (adapter->link_speed) {
1585                 case IXGBE_LINK_SPEED_10GB_FULL:
1586                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1587                         break;
1588                 }
1589         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590                 switch (adapter->link_speed) {
1591                 case IXGBE_LINK_SPEED_10GB_FULL:
1592                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1593                         break;
1594                 case IXGBE_LINK_SPEED_1GB_FULL:
1595                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1596                         break;
1597                 }
1598         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599                 switch (adapter->link_speed) {
1600                 case IXGBE_LINK_SPEED_10GB_FULL:
1601                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1602                         break;
1603                 case IXGBE_LINK_SPEED_1GB_FULL:
1604                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1605                         break;
1606                 }
1607         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609                 switch (adapter->link_speed) {
1610                 case IXGBE_LINK_SPEED_10GB_FULL:
1611                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1612                         break;
1613                 case IXGBE_LINK_SPEED_1GB_FULL:
1614                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1615                         break;
1616                 }
1617         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618                 switch (adapter->link_speed) {
1619                 case IXGBE_LINK_SPEED_10GB_FULL:
1620                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1621                         break;
1622                 }
1623         /*
1624         ** XXX: These need to use the proper media types once
1625         ** they're added.
1626         */
1627         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628                 switch (adapter->link_speed) {
1629                 case IXGBE_LINK_SPEED_10GB_FULL:
1630                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1631                         break;
1632                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1633                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1634                         break;
1635                 case IXGBE_LINK_SPEED_1GB_FULL:
1636                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1637                         break;
1638                 }
1639         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640             || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641                 switch (adapter->link_speed) {
1642                 case IXGBE_LINK_SPEED_10GB_FULL:
1643                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1644                         break;
1645                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1646                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1647                         break;
1648                 case IXGBE_LINK_SPEED_1GB_FULL:
1649                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1650                         break;
1651                 }
1652         
1653         /* If nothing is recognized... */
1654         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655                 ifmr->ifm_active |= IFM_UNKNOWN;
1656         
1657 #if __FreeBSD_version >= 900025
1658         /* Display current flow control setting used on link */
1659         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660             hw->fc.current_mode == ixgbe_fc_full)
1661                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663             hw->fc.current_mode == ixgbe_fc_full)
1664                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1665 #endif
1666
1667         IXGBE_CORE_UNLOCK(adapter);
1668
1669         return;
1670 }
1671
1672 /*********************************************************************
1673  *
1674  *  Media Ioctl callback
1675  *
1676  *  This routine is called when the user changes speed/duplex using
1677  *  media/mediopt option with ifconfig.
1678  *
1679  **********************************************************************/
1680 static int
1681 ixgbe_media_change(struct ifnet * ifp)
1682 {
1683         struct adapter *adapter = ifp->if_softc;
1684         struct ifmedia *ifm = &adapter->media;
1685         struct ixgbe_hw *hw = &adapter->hw;
1686         ixgbe_link_speed speed = 0;
1687
1688         INIT_DEBUGOUT("ixgbe_media_change: begin");
1689
1690         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1691                 return (EINVAL);
1692
1693         if (hw->phy.media_type == ixgbe_media_type_backplane)
1694                 return (EPERM);
1695
1696         /*
1697         ** We don't actually need to check against the supported
1698         ** media types of the adapter; ifmedia will take care of
1699         ** that for us.
1700         */
1701         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1702                 case IFM_AUTO:
1703                 case IFM_10G_T:
1704                         speed |= IXGBE_LINK_SPEED_100_FULL;
1705                 case IFM_10G_LRM:
1706                 case IFM_10G_SR: /* KR, too */
1707                 case IFM_10G_LR:
1708                 case IFM_10G_CX4: /* KX4 */
1709                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710                 case IFM_10G_TWINAX:
1711                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
1712                         break;
1713                 case IFM_1000_T:
1714                         speed |= IXGBE_LINK_SPEED_100_FULL;
1715                 case IFM_1000_LX:
1716                 case IFM_1000_SX:
1717                 case IFM_1000_CX: /* KX */
1718                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1719                         break;
1720                 case IFM_100_TX:
1721                         speed |= IXGBE_LINK_SPEED_100_FULL;
1722                         break;
1723                 default:
1724                         goto invalid;
1725         }
1726
1727         hw->mac.autotry_restart = TRUE;
1728         hw->mac.ops.setup_link(hw, speed, TRUE);
1729         adapter->advertise =
1730                 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731                 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732                 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1733
1734         return (0);
1735
1736 invalid:
1737         device_printf(adapter->dev, "Invalid media type!\n");
1738         return (EINVAL);
1739 }
1740
1741 static void
1742 ixgbe_set_promisc(struct adapter *adapter)
1743 {
1744         u_int32_t       reg_rctl;
1745         struct ifnet   *ifp = adapter->ifp;
1746         int             mcnt = 0;
1747
1748         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749         reg_rctl &= (~IXGBE_FCTRL_UPE);
1750         if (ifp->if_flags & IFF_ALLMULTI)
1751                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1752         else {
1753                 struct  ifmultiaddr *ifma;
1754 #if __FreeBSD_version < 800000
1755                 IF_ADDR_LOCK(ifp);
1756 #else
1757                 if_maddr_rlock(ifp);
1758 #endif
1759                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760                         if (ifma->ifma_addr->sa_family != AF_LINK)
1761                                 continue;
1762                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1763                                 break;
1764                         mcnt++;
1765                 }
1766 #if __FreeBSD_version < 800000
1767                 IF_ADDR_UNLOCK(ifp);
1768 #else
1769                 if_maddr_runlock(ifp);
1770 #endif
1771         }
1772         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773                 reg_rctl &= (~IXGBE_FCTRL_MPE);
1774         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1775
1776         if (ifp->if_flags & IFF_PROMISC) {
1777                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779         } else if (ifp->if_flags & IFF_ALLMULTI) {
1780                 reg_rctl |= IXGBE_FCTRL_MPE;
1781                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1782                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1783         }
1784         return;
1785 }
1786
1787
1788 /*********************************************************************
1789  *  Multicast Update
1790  *
1791  *  This routine is called whenever multicast address list is updated.
1792  *
1793  **********************************************************************/
1794 #define IXGBE_RAR_ENTRIES 16
1795
1796 static void
1797 ixgbe_set_multi(struct adapter *adapter)
1798 {
1799         u32     fctrl;
1800         u8      *mta;
1801         u8      *update_ptr;
1802         struct  ifmultiaddr *ifma;
1803         int     mcnt = 0;
1804         struct ifnet   *ifp = adapter->ifp;
1805
1806         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1807
1808         mta = adapter->mta;
1809         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810             MAX_NUM_MULTICAST_ADDRESSES);
1811
1812 #if __FreeBSD_version < 800000
1813         IF_ADDR_LOCK(ifp);
1814 #else
1815         if_maddr_rlock(ifp);
1816 #endif
1817         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818                 if (ifma->ifma_addr->sa_family != AF_LINK)
1819                         continue;
1820                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1821                         break;
1822                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1825                 mcnt++;
1826         }
1827 #if __FreeBSD_version < 800000
1828         IF_ADDR_UNLOCK(ifp);
1829 #else
1830         if_maddr_runlock(ifp);
1831 #endif
1832
1833         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835         if (ifp->if_flags & IFF_PROMISC)
1836                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838             ifp->if_flags & IFF_ALLMULTI) {
1839                 fctrl |= IXGBE_FCTRL_MPE;
1840                 fctrl &= ~IXGBE_FCTRL_UPE;
1841         } else
1842                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843         
1844         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1845
1846         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1847                 update_ptr = mta;
1848                 ixgbe_update_mc_addr_list(&adapter->hw,
1849                     update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1850         }
1851
1852         return;
1853 }
1854
1855 /*
1856  * This is an iterator function now needed by the multicast
1857  * shared code. It simply feeds the shared code routine the
1858  * addresses in the array of ixgbe_set_multi() one by one.
1859  */
1860 static u8 *
1861 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1862 {
1863         u8 *addr = *update_ptr;
1864         u8 *newptr;
1865         *vmdq = 0;
1866
1867         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868         *update_ptr = newptr;
1869         return addr;
1870 }
1871
1872
1873 /*********************************************************************
1874  *  Timer routine
1875  *
1876  *  This routine checks for link status,updates statistics,
1877  *  and runs the watchdog check.
1878  *
1879  **********************************************************************/
1880
1881 static void
1882 ixgbe_local_timer(void *arg)
1883 {
1884         struct adapter  *adapter = arg;
1885         device_t        dev = adapter->dev;
1886         struct ix_queue *que = adapter->queues;
1887         u64             queues = 0;
1888         int             hung = 0;
1889
1890         mtx_assert(&adapter->core_mtx, MA_OWNED);
1891
1892         /* Check for pluggable optics */
1893         if (adapter->sfp_probe)
1894                 if (!ixgbe_sfp_probe(adapter))
1895                         goto out; /* Nothing to do */
1896
1897         ixgbe_update_link_status(adapter);
1898         ixgbe_update_stats_counters(adapter);
1899
1900         /*
1901         ** Check the TX queues status
1902         **      - mark hung queues so we don't schedule on them
1903         **      - watchdog only if all queues show hung
1904         */          
1905         for (int i = 0; i < adapter->num_queues; i++, que++) {
1906                 /* Keep track of queues with work for soft irq */
1907                 if (que->txr->busy)
1908                         queues |= ((u64)1 << que->me);
1909                 /*
1910                 ** Each time txeof runs without cleaning, but there
1911                 ** are uncleaned descriptors it increments busy. If
1912                 ** we get to the MAX we declare it hung.
1913                 */
1914                 if (que->busy == IXGBE_QUEUE_HUNG) {
1915                         ++hung;
1916                         /* Mark the queue as inactive */
1917                         adapter->active_queues &= ~((u64)1 << que->me);
1918                         continue;
1919                 } else {
1920                         /* Check if we've come back from hung */
1921                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922                                 adapter->active_queues |= ((u64)1 << que->me);
1923                 }
1924                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925                         device_printf(dev,"Warning queue %d "
1926                             "appears to be hung!\n", i);
1927                         que->txr->busy = IXGBE_QUEUE_HUNG;
1928                         ++hung;
1929                 }
1930
1931         }
1932
1933         /* Only truly watchdog if all queues show hung */
1934         if (hung == adapter->num_queues)
1935                 goto watchdog;
1936         else if (queues != 0) { /* Force an IRQ on queues with work */
1937                 ixgbe_rearm_queues(adapter, queues);
1938         }
1939
1940 out:
1941         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1942         return;
1943
1944 watchdog:
1945         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947         adapter->watchdog_events++;
1948         ixgbe_init_locked(adapter);
1949 }
1950
1951 /*
1952 ** Note: this routine updates the OS on the link state
1953 **      the real check of the hardware only happens with
1954 **      a link interrupt.
1955 */
1956 static void
1957 ixgbe_update_link_status(struct adapter *adapter)
1958 {
1959         struct ifnet    *ifp = adapter->ifp;
1960         device_t dev = adapter->dev;
1961
1962         if (adapter->link_up){ 
1963                 if (adapter->link_active == FALSE) {
1964                         if (bootverbose)
1965                                 device_printf(dev,"Link is up %d Gbps %s \n",
1966                                     ((adapter->link_speed == 128)? 10:1),
1967                                     "Full Duplex");
1968                         adapter->link_active = TRUE;
1969                         /* Update any Flow Control changes */
1970                         ixgbe_fc_enable(&adapter->hw);
1971                         /* Update DMA coalescing config */
1972                         ixgbe_config_dmac(adapter);
1973                         if_link_state_change(ifp, LINK_STATE_UP);
1974                 }
1975         } else { /* Link down */
1976                 if (adapter->link_active == TRUE) {
1977                         if (bootverbose)
1978                                 device_printf(dev,"Link is Down\n");
1979                         if_link_state_change(ifp, LINK_STATE_DOWN);
1980                         adapter->link_active = FALSE;
1981                 }
1982         }
1983
1984         return;
1985 }
1986
1987
1988 /*********************************************************************
1989  *
1990  *  This routine disables all traffic on the adapter by issuing a
1991  *  global reset on the MAC and deallocates TX/RX buffers.
1992  *
1993  **********************************************************************/
1994
1995 static void
1996 ixgbe_stop(void *arg)
1997 {
1998         struct ifnet   *ifp;
1999         struct adapter *adapter = arg;
2000         struct ixgbe_hw *hw = &adapter->hw;
2001         ifp = adapter->ifp;
2002
2003         mtx_assert(&adapter->core_mtx, MA_OWNED);
2004
2005         INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006         ixgbe_disable_intr(adapter);
2007         callout_stop(&adapter->timer);
2008
2009         /* Let the stack know...*/
2010         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2011
2012         ixgbe_reset_hw(hw);
2013         hw->adapter_stopped = FALSE;
2014         ixgbe_stop_adapter(hw);
2015         if (hw->mac.type == ixgbe_mac_82599EB)
2016                 ixgbe_stop_mac_link_on_d3_82599(hw);
2017         /* Turn off the laser - noop with no optics */
2018         ixgbe_disable_tx_laser(hw);
2019
2020         /* Update the stack */
2021         adapter->link_up = FALSE;
2022         ixgbe_update_link_status(adapter);
2023
2024         /* reprogram the RAR[0] in case user changed it. */
2025         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2026
2027         return;
2028 }
2029
2030
2031 /*********************************************************************
2032  *
2033  *  Determine hardware revision.
2034  *
2035  **********************************************************************/
2036 static void
2037 ixgbe_identify_hardware(struct adapter *adapter)
2038 {
2039         device_t        dev = adapter->dev;
2040         struct ixgbe_hw *hw = &adapter->hw;
2041
2042         /* Save off the information about this board */
2043         hw->vendor_id = pci_get_vendor(dev);
2044         hw->device_id = pci_get_device(dev);
2045         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046         hw->subsystem_vendor_id =
2047             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048         hw->subsystem_device_id =
2049             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2050
2051         /*
2052         ** Make sure BUSMASTER is set
2053         */
2054         pci_enable_busmaster(dev);
2055
2056         /* We need this here to set the num_segs below */
2057         ixgbe_set_mac_type(hw);
2058
2059         /* Pick up the 82599 settings */
2060         if (hw->mac.type != ixgbe_mac_82598EB) {
2061                 hw->phy.smart_speed = ixgbe_smart_speed;
2062                 adapter->num_segs = IXGBE_82599_SCATTER;
2063         } else
2064                 adapter->num_segs = IXGBE_82598_SCATTER;
2065
2066         return;
2067 }
2068
2069 /*********************************************************************
2070  *
2071  *  Determine optic type
2072  *
2073  **********************************************************************/
2074 static void
2075 ixgbe_setup_optics(struct adapter *adapter)
2076 {
2077         struct ixgbe_hw *hw = &adapter->hw;
2078         int             layer;
2079
2080         layer = ixgbe_get_supported_physical_layer(hw);
2081
2082         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083                 adapter->optics = IFM_10G_T;
2084                 return;
2085         }
2086
2087         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088                 adapter->optics = IFM_1000_T;
2089                 return;
2090         }
2091
2092         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093                 adapter->optics = IFM_1000_SX;
2094                 return;
2095         }
2096
2097         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098             IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099                 adapter->optics = IFM_10G_LR;
2100                 return;
2101         }
2102
2103         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104                 adapter->optics = IFM_10G_SR;
2105                 return;
2106         }
2107
2108         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109                 adapter->optics = IFM_10G_TWINAX;
2110                 return;
2111         }
2112
2113         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114             IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115                 adapter->optics = IFM_10G_CX4;
2116                 return;
2117         }
2118
2119         /* If we get here just set the default */
2120         adapter->optics = IFM_ETHER | IFM_AUTO;
2121         return;
2122 }
2123
2124 /*********************************************************************
2125  *
2126  *  Setup the Legacy or MSI Interrupt handler
2127  *
2128  **********************************************************************/
2129 static int
2130 ixgbe_allocate_legacy(struct adapter *adapter)
2131 {
2132         device_t        dev = adapter->dev;
2133         struct          ix_queue *que = adapter->queues;
2134 #ifndef IXGBE_LEGACY_TX
2135         struct tx_ring          *txr = adapter->tx_rings;
2136 #endif
2137         int             error, rid = 0;
2138
2139         /* MSI RID at 1 */
2140         if (adapter->msix == 1)
2141                 rid = 1;
2142
2143         /* We allocate a single interrupt resource */
2144         adapter->res = bus_alloc_resource_any(dev,
2145             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146         if (adapter->res == NULL) {
2147                 device_printf(dev, "Unable to allocate bus resource: "
2148                     "interrupt\n");
2149                 return (ENXIO);
2150         }
2151
2152         /*
2153          * Try allocating a fast interrupt and the associated deferred
2154          * processing contexts.
2155          */
2156 #ifndef IXGBE_LEGACY_TX
2157         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2158 #endif
2159         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161             taskqueue_thread_enqueue, &que->tq);
2162         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163             device_get_nameunit(adapter->dev));
2164
2165         /* Tasklets for Link, SFP and Multispeed Fiber */
2166         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2170 #ifdef IXGBE_FDIR
2171         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2172 #endif
2173         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174             taskqueue_thread_enqueue, &adapter->tq);
2175         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176             device_get_nameunit(adapter->dev));
2177
2178         if ((error = bus_setup_intr(dev, adapter->res,
2179             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180             que, &adapter->tag)) != 0) {
2181                 device_printf(dev, "Failed to register fast interrupt "
2182                     "handler: %d\n", error);
2183                 taskqueue_free(que->tq);
2184                 taskqueue_free(adapter->tq);
2185                 que->tq = NULL;
2186                 adapter->tq = NULL;
2187                 return (error);
2188         }
2189         /* For simplicity in the handlers */
2190         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2191
2192         return (0);
2193 }
2194
2195
2196 /*********************************************************************
2197  *
2198  *  Setup MSIX Interrupt resources and handlers 
2199  *
2200  **********************************************************************/
2201 static int
2202 ixgbe_allocate_msix(struct adapter *adapter)
2203 {
2204         device_t        dev = adapter->dev;
2205         struct          ix_queue *que = adapter->queues;
2206         struct          tx_ring *txr = adapter->tx_rings;
2207         int             error, rid, vector = 0;
2208         int             cpu_id = 0;
2209
2210         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2211                 rid = vector + 1;
2212                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213                     RF_SHAREABLE | RF_ACTIVE);
2214                 if (que->res == NULL) {
2215                         device_printf(dev,"Unable to allocate"
2216                             " bus resource: que interrupt [%d]\n", vector);
2217                         return (ENXIO);
2218                 }
2219                 /* Set the handler function */
2220                 error = bus_setup_intr(dev, que->res,
2221                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222                     ixgbe_msix_que, que, &que->tag);
2223                 if (error) {
2224                         que->res = NULL;
2225                         device_printf(dev, "Failed to register QUE handler");
2226                         return (error);
2227                 }
2228 #if __FreeBSD_version >= 800504
2229                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2230 #endif
2231                 que->msix = vector;
2232                 adapter->active_queues |= (u64)(1 << que->msix);
2233                 /*
2234                  * Bind the msix vector, and thus the
2235                  * rings to the corresponding cpu.
2236                  *
2237                  * This just happens to match the default RSS round-robin
2238                  * bucket -> queue -> CPU allocation.
2239                  */
2240                 if (adapter->num_queues > 1)
2241                         cpu_id = i;
2242
2243                 if (adapter->num_queues > 1)
2244                         bus_bind_intr(dev, que->res, cpu_id);
2245
2246 #ifndef IXGBE_LEGACY_TX
2247                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2248 #endif
2249                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251                     taskqueue_thread_enqueue, &que->tq);
2252                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253                     device_get_nameunit(adapter->dev));
2254         }
2255
2256         /* and Link */
2257         rid = vector + 1;
2258         adapter->res = bus_alloc_resource_any(dev,
2259             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260         if (!adapter->res) {
2261                 device_printf(dev,"Unable to allocate"
2262             " bus resource: Link interrupt [%d]\n", rid);
2263                 return (ENXIO);
2264         }
2265         /* Set the link handler function */
2266         error = bus_setup_intr(dev, adapter->res,
2267             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268             ixgbe_msix_link, adapter, &adapter->tag);
2269         if (error) {
2270                 adapter->res = NULL;
2271                 device_printf(dev, "Failed to register LINK handler");
2272                 return (error);
2273         }
2274 #if __FreeBSD_version >= 800504
2275         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2276 #endif
2277         adapter->vector = vector;
2278         /* Tasklets for Link, SFP and Multispeed Fiber */
2279         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2283 #ifdef IXGBE_FDIR
2284         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2285 #endif
2286         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287             taskqueue_thread_enqueue, &adapter->tq);
2288         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289             device_get_nameunit(adapter->dev));
2290
2291         return (0);
2292 }
2293
2294 /*
2295  * Setup Either MSI/X or MSI
2296  */
2297 static int
2298 ixgbe_setup_msix(struct adapter *adapter)
2299 {
2300         device_t dev = adapter->dev;
2301         int rid, want, queues, msgs;
2302
2303         /* Override by tuneable */
2304         if (ixgbe_enable_msix == 0)
2305                 goto msi;
2306
2307         /* First try MSI/X */
2308         msgs = pci_msix_count(dev); 
2309         if (msgs == 0)
2310                 goto msi;
2311         rid = PCIR_BAR(MSIX_82598_BAR);
2312         adapter->msix_mem = bus_alloc_resource_any(dev,
2313             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314         if (adapter->msix_mem == NULL) {
2315                 rid += 4;       /* 82599 maps in higher BAR */
2316                 adapter->msix_mem = bus_alloc_resource_any(dev,
2317                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2318         }
2319         if (adapter->msix_mem == NULL) {
2320                 /* May not be enabled */
2321                 device_printf(adapter->dev,
2322                     "Unable to map MSIX table \n");
2323                 goto msi;
2324         }
2325
2326         /* Figure out a reasonable auto config value */
2327         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2328
2329         if (ixgbe_num_queues != 0)
2330                 queues = ixgbe_num_queues;
2331
2332         /* reflect correct sysctl value */
2333         ixgbe_num_queues = queues;
2334
2335         /*
2336         ** Want one vector (RX/TX pair) per queue
2337         ** plus an additional for Link.
2338         */
2339         want = queues + 1;
2340         if (msgs >= want)
2341                 msgs = want;
2342         else {
2343                 device_printf(adapter->dev,
2344                     "MSIX Configuration Problem, "
2345                     "%d vectors but %d queues wanted!\n",
2346                     msgs, want);
2347                 goto msi;
2348         }
2349         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2350                 device_printf(adapter->dev,
2351                     "Using MSIX interrupts with %d vectors\n", msgs);
2352                 adapter->num_queues = queues;
2353                 return (msgs);
2354         }
2355         /*
2356         ** If MSIX alloc failed or provided us with
2357         ** less than needed, free and fall through to MSI
2358         */
2359         pci_release_msi(dev);
2360
2361 msi:
2362         if (adapter->msix_mem != NULL) {
2363                 bus_release_resource(dev, SYS_RES_MEMORY,
2364                     rid, adapter->msix_mem);
2365                 adapter->msix_mem = NULL;
2366         }
2367         msgs = 1;
2368         if (pci_alloc_msi(dev, &msgs) == 0) {
2369                 device_printf(adapter->dev,"Using an MSI interrupt\n");
2370                 return (msgs);
2371         }
2372         device_printf(adapter->dev,"Using a Legacy interrupt\n");
2373         return (0);
2374 }
2375
2376
2377 static int
2378 ixgbe_allocate_pci_resources(struct adapter *adapter)
2379 {
2380         int             rid;
2381         device_t        dev = adapter->dev;
2382
2383         rid = PCIR_BAR(0);
2384         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2385             &rid, RF_ACTIVE);
2386
2387         if (!(adapter->pci_mem)) {
2388                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2389                 return (ENXIO);
2390         }
2391
2392         adapter->osdep.mem_bus_space_tag =
2393                 rman_get_bustag(adapter->pci_mem);
2394         adapter->osdep.mem_bus_space_handle =
2395                 rman_get_bushandle(adapter->pci_mem);
2396         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2397
2398         /* Legacy defaults */
2399         adapter->num_queues = 1;
2400         adapter->hw.back = &adapter->osdep;
2401
2402         /*
2403         ** Now setup MSI or MSI/X, should
2404         ** return us the number of supported
2405         ** vectors. (Will be 1 for MSI)
2406         */
2407         adapter->msix = ixgbe_setup_msix(adapter);
2408         return (0);
2409 }
2410
2411 static void
2412 ixgbe_free_pci_resources(struct adapter * adapter)
2413 {
2414         struct          ix_queue *que = adapter->queues;
2415         device_t        dev = adapter->dev;
2416         int             rid, memrid;
2417
2418         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2419                 memrid = PCIR_BAR(MSIX_82598_BAR);
2420         else
2421                 memrid = PCIR_BAR(MSIX_82599_BAR);
2422
2423         /*
2424         ** There is a slight possibility of a failure mode
2425         ** in attach that will result in entering this function
2426         ** before interrupt resources have been initialized, and
2427         ** in that case we do not want to execute the loops below
2428         ** We can detect this reliably by the state of the adapter
2429         ** res pointer.
2430         */
2431         if (adapter->res == NULL)
2432                 goto mem;
2433
2434         /*
2435         **  Release all msix queue resources:
2436         */
2437         for (int i = 0; i < adapter->num_queues; i++, que++) {
2438                 rid = que->msix + 1;
2439                 if (que->tag != NULL) {
2440                         bus_teardown_intr(dev, que->res, que->tag);
2441                         que->tag = NULL;
2442                 }
2443                 if (que->res != NULL)
2444                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2445         }
2446
2447
2448         /* Clean the Legacy or Link interrupt last */
2449         if (adapter->vector) /* we are doing MSIX */
2450                 rid = adapter->vector + 1;
2451         else
2452                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2453
2454         if (adapter->tag != NULL) {
2455                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2456                 adapter->tag = NULL;
2457         }
2458         if (adapter->res != NULL)
2459                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2460
2461 mem:
2462         if (adapter->msix)
2463                 pci_release_msi(dev);
2464
2465         if (adapter->msix_mem != NULL)
2466                 bus_release_resource(dev, SYS_RES_MEMORY,
2467                     memrid, adapter->msix_mem);
2468
2469         if (adapter->pci_mem != NULL)
2470                 bus_release_resource(dev, SYS_RES_MEMORY,
2471                     PCIR_BAR(0), adapter->pci_mem);
2472
2473         return;
2474 }
2475
2476 /*********************************************************************
2477  *
2478  *  Setup networking device structure and register an interface.
2479  *
2480  **********************************************************************/
2481 static int
2482 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2483 {
2484         struct ifnet   *ifp;
2485
2486         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2487
2488         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2489         if (ifp == NULL) {
2490                 device_printf(dev, "can not allocate ifnet structure\n");
2491                 return (-1);
2492         }
2493         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2494         ifp->if_baudrate = IF_Gbps(10);
2495         ifp->if_init = ixgbe_init;
2496         ifp->if_softc = adapter;
2497         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2498         ifp->if_ioctl = ixgbe_ioctl;
2499 #if __FreeBSD_version >= 1100036
2500         if_setgetcounterfn(ifp, ixgbe_get_counter);
2501 #endif
2502 #if __FreeBSD_version >= 1100045
2503         /* TSO parameters */
2504         ifp->if_hw_tsomax = 65518;
2505         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2506         ifp->if_hw_tsomaxsegsize = 2048;
2507 #endif
2508 #ifndef IXGBE_LEGACY_TX
2509         ifp->if_transmit = ixgbe_mq_start;
2510         ifp->if_qflush = ixgbe_qflush;
2511 #else
2512         ifp->if_start = ixgbe_start;
2513         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2514         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2515         IFQ_SET_READY(&ifp->if_snd);
2516 #endif
2517
2518         ether_ifattach(ifp, adapter->hw.mac.addr);
2519
2520         adapter->max_frame_size =
2521             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2522
2523         /*
2524          * Tell the upper layer(s) we support long frames.
2525          */
2526         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2527
2528         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2529         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2530         ifp->if_capabilities |= IFCAP_LRO;
2531         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2532                              |  IFCAP_VLAN_HWTSO
2533                              |  IFCAP_VLAN_MTU
2534                              |  IFCAP_HWSTATS;
2535         ifp->if_capenable = ifp->if_capabilities;
2536
2537         /*
2538         ** Don't turn this on by default, if vlans are
2539         ** created on another pseudo device (eg. lagg)
2540         ** then vlan events are not passed thru, breaking
2541         ** operation, but with HW FILTER off it works. If
2542         ** using vlans directly on the ixgbe driver you can
2543         ** enable this and get full hardware tag filtering.
2544         */
2545         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2546
2547         /*
2548          * Specify the media types supported by this adapter and register
2549          * callbacks to update media and link information
2550          */
2551         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2552                     ixgbe_media_status);
2553
2554         ixgbe_add_media_types(adapter);
2555
2556         /* Autoselect media by default */
2557         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2558
2559         return (0);
2560 }
2561
2562 static void
2563 ixgbe_add_media_types(struct adapter *adapter)
2564 {
2565         struct ixgbe_hw *hw = &adapter->hw;
2566         device_t dev = adapter->dev;
2567         int layer;
2568
2569         layer = ixgbe_get_supported_physical_layer(hw);
2570
2571         /* Media types with matching FreeBSD media defines */
2572         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2573                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2574         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2575                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2576         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2577                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2578         
2579         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2580             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2581                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2582
2583         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2584                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2585         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2586                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2587         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2588                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2589         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2590                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2591
2592         /*
2593         ** Other (no matching FreeBSD media type):
2594         ** To workaround this, we'll assign these completely
2595         ** inappropriate media types.
2596         */
2597         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2598                 device_printf(dev, "Media supported: 10GbaseKR\n");
2599                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2600                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2601         }
2602         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2603                 device_printf(dev, "Media supported: 10GbaseKX4\n");
2604                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2605                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2606         }
2607         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2608                 device_printf(dev, "Media supported: 1000baseKX\n");
2609                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2610                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2611         }
2612         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2613                 /* Someday, someone will care about you... */
2614                 device_printf(dev, "Media supported: 1000baseBX\n");
2615         }
2616         
2617         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2618                 ifmedia_add(&adapter->media,
2619                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2620                 ifmedia_add(&adapter->media,
2621                     IFM_ETHER | IFM_1000_T, 0, NULL);
2622         }
2623
2624         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2625 }
2626
2627 static void
2628 ixgbe_config_link(struct adapter *adapter)
2629 {
2630         struct ixgbe_hw *hw = &adapter->hw;
2631         u32     autoneg, err = 0;
2632         bool    sfp, negotiate;
2633
2634         sfp = ixgbe_is_sfp(hw);
2635
2636         if (sfp) { 
2637                 if (hw->phy.multispeed_fiber) {
2638                         hw->mac.ops.setup_sfp(hw);
2639                         ixgbe_enable_tx_laser(hw);
2640                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2641                 } else
2642                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2643         } else {
2644                 if (hw->mac.ops.check_link)
2645                         err = ixgbe_check_link(hw, &adapter->link_speed,
2646                             &adapter->link_up, FALSE);
2647                 if (err)
2648                         goto out;
2649                 autoneg = hw->phy.autoneg_advertised;
2650                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2651                         err  = hw->mac.ops.get_link_capabilities(hw,
2652                             &autoneg, &negotiate);
2653                 if (err)
2654                         goto out;
2655                 if (hw->mac.ops.setup_link)
2656                         err = hw->mac.ops.setup_link(hw,
2657                             autoneg, adapter->link_up);
2658         }
2659 out:
2660         return;
2661 }
2662
2663
2664 /*********************************************************************
2665  *
2666  *  Enable transmit units.
2667  *
2668  **********************************************************************/
2669 static void
2670 ixgbe_initialize_transmit_units(struct adapter *adapter)
2671 {
2672         struct tx_ring  *txr = adapter->tx_rings;
2673         struct ixgbe_hw *hw = &adapter->hw;
2674
2675         /* Setup the Base and Length of the Tx Descriptor Ring */
2676
2677         for (int i = 0; i < adapter->num_queues; i++, txr++) {
2678                 u64     tdba = txr->txdma.dma_paddr;
2679                 u32     txctrl = 0;
2680
2681                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2682                        (tdba & 0x00000000ffffffffULL));
2683                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2684                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2685                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2686
2687                 /* Setup the HW Tx Head and Tail descriptor pointers */
2688                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2689                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2690
2691                 /* Cache the tail address */
2692                 txr->tail = IXGBE_TDT(txr->me);
2693
2694                 /* Set the processing limit */
2695                 txr->process_limit = ixgbe_tx_process_limit;
2696
2697                 /* Disable Head Writeback */
2698                 switch (hw->mac.type) {
2699                 case ixgbe_mac_82598EB:
2700                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2701                         break;
2702                 case ixgbe_mac_82599EB:
2703                 case ixgbe_mac_X540:
2704                 default:
2705                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2706                         break;
2707                 }
2708                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2709                 switch (hw->mac.type) {
2710                 case ixgbe_mac_82598EB:
2711                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2712                         break;
2713                 case ixgbe_mac_82599EB:
2714                 case ixgbe_mac_X540:
2715                 default:
2716                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2717                         break;
2718                 }
2719
2720         }
2721
2722         if (hw->mac.type != ixgbe_mac_82598EB) {
2723                 u32 dmatxctl, rttdcs;
2724                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2725                 dmatxctl |= IXGBE_DMATXCTL_TE;
2726                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2727                 /* Disable arbiter to set MTQC */
2728                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2729                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2730                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2731                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2732                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2733                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2734         }
2735
2736         return;
2737 }
2738
2739 static void
2740 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2741 {
2742         struct ixgbe_hw *hw = &adapter->hw;
2743         uint32_t reta;
2744         int i, j, queue_id, table_size;
2745         int index_mult;
2746         uint32_t rss_key[10];
2747         uint32_t mrqc;
2748
2749         /* Setup RSS */
2750         reta = 0;
2751
2752         /* set up random bits */
2753         arc4rand(&rss_key, sizeof(rss_key), 0);
2754
2755         /* Set multiplier for RETA setup and table size based on MAC */
2756         index_mult = 0x1;
2757         table_size = 128;
2758         switch (adapter->hw.mac.type) {
2759         case ixgbe_mac_82598EB:
2760                 index_mult = 0x11;
2761                 break;
2762         case ixgbe_mac_X550:
2763         case ixgbe_mac_X550EM_x:
2764                 table_size = 512;
2765                 break;
2766         default:
2767                 break;
2768         }
2769
2770         /* Set up the redirection table */
2771         for (i = 0, j = 0; i < table_size; i++, j++) {
2772                 if (j == adapter->num_queues) j = 0;
2773                 queue_id = (j * index_mult);
2774                 /*
2775                  * The low 8 bits are for hash value (n+0);
2776                  * The next 8 bits are for hash value (n+1), etc.
2777                  */
2778                 reta = reta >> 8;
2779                 reta = reta | ( ((uint32_t) queue_id) << 24);
2780                 if ((i & 3) == 3) {
2781                         if (i < 128)
2782                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2783                         else
2784                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2785                         reta = 0;
2786                 }
2787         }
2788
2789         /* Now fill our hash function seeds */
2790         for (int i = 0; i < 10; i++)
2791                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2792
2793         /* Perform hash on these packet types */
2794         /*
2795          * Disable UDP - IP fragments aren't currently being handled
2796          * and so we end up with a mix of 2-tuple and 4-tuple
2797          * traffic.
2798          */
2799         mrqc = IXGBE_MRQC_RSSEN
2800              | IXGBE_MRQC_RSS_FIELD_IPV4
2801              | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2802 #if 0
2803              | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2804 #endif
2805              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2806              | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2807              | IXGBE_MRQC_RSS_FIELD_IPV6
2808              | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2809 #if 0
2810              | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2811              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2812 #endif
2813         ;
2814         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2815 }
2816
2817
2818 /*********************************************************************
2819  *
2820  *  Setup receive registers and features.
2821  *
2822  **********************************************************************/
2823 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2824
2825 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2826         
2827 static void
2828 ixgbe_initialize_receive_units(struct adapter *adapter)
2829 {
2830         struct  rx_ring *rxr = adapter->rx_rings;
2831         struct ixgbe_hw *hw = &adapter->hw;
2832         struct ifnet   *ifp = adapter->ifp;
2833         u32             bufsz, fctrl, srrctl, rxcsum;
2834         u32             hlreg;
2835
2836
2837         /*
2838          * Make sure receives are disabled while
2839          * setting up the descriptor ring
2840          */
2841         ixgbe_disable_rx(hw);
2842
2843         /* Enable broadcasts */
2844         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2845         fctrl |= IXGBE_FCTRL_BAM;
2846         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2847                 fctrl |= IXGBE_FCTRL_DPF;
2848                 fctrl |= IXGBE_FCTRL_PMCF;
2849         }
2850         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2851
2852         /* Set for Jumbo Frames? */
2853         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2854         if (ifp->if_mtu > ETHERMTU)
2855                 hlreg |= IXGBE_HLREG0_JUMBOEN;
2856         else
2857                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2858 #ifdef DEV_NETMAP
2859         /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2860         if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2861                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2862         else
2863                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2864 #endif /* DEV_NETMAP */
2865         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2866
2867         bufsz = (adapter->rx_mbuf_sz +
2868             BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2869
2870         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2871                 u64 rdba = rxr->rxdma.dma_paddr;
2872
2873                 /* Setup the Base and Length of the Rx Descriptor Ring */
2874                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2875                                (rdba & 0x00000000ffffffffULL));
2876                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2877                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2878                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2879
2880                 /* Set up the SRRCTL register */
2881                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2882                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2883                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2884                 srrctl |= bufsz;
2885                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2886
2887                 /*
2888                  * Set DROP_EN iff we have no flow control and >1 queue.
2889                  * Note that srrctl was cleared shortly before during reset,
2890                  * so we do not need to clear the bit, but do it just in case
2891                  * this code is moved elsewhere.
2892                  */
2893                 if (adapter->num_queues > 1 &&
2894                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2895                         srrctl |= IXGBE_SRRCTL_DROP_EN;
2896                 } else {
2897                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2898                 }
2899
2900                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2901
2902                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2903                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2904                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2905
2906                 /* Set the processing limit */
2907                 rxr->process_limit = ixgbe_rx_process_limit;
2908
2909                 /* Set the driver rx tail address */
2910                 rxr->tail =  IXGBE_RDT(rxr->me);
2911         }
2912
2913         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2914                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2915                               IXGBE_PSRTYPE_UDPHDR |
2916                               IXGBE_PSRTYPE_IPV4HDR |
2917                               IXGBE_PSRTYPE_IPV6HDR;
2918                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2919         }
2920
2921         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2922
2923         ixgbe_initialise_rss_mapping(adapter);
2924
2925         if (adapter->num_queues > 1) {
2926                 /* RSS and RX IPP Checksum are mutually exclusive */
2927                 rxcsum |= IXGBE_RXCSUM_PCSD;
2928         }
2929
2930         if (ifp->if_capenable & IFCAP_RXCSUM)
2931                 rxcsum |= IXGBE_RXCSUM_PCSD;
2932
2933         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2934                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2935
2936         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2937
2938         return;
2939 }
2940
2941
2942 /*
2943 ** This routine is run via an vlan config EVENT,
2944 ** it enables us to use the HW Filter table since
2945 ** we can get the vlan id. This just creates the
2946 ** entry in the soft version of the VFTA, init will
2947 ** repopulate the real table.
2948 */
2949 static void
2950 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2951 {
2952         struct adapter  *adapter = ifp->if_softc;
2953         u16             index, bit;
2954
2955         if (ifp->if_softc !=  arg)   /* Not our event */
2956                 return;
2957
2958         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2959                 return;
2960
2961         IXGBE_CORE_LOCK(adapter);
2962         index = (vtag >> 5) & 0x7F;
2963         bit = vtag & 0x1F;
2964         adapter->shadow_vfta[index] |= (1 << bit);
2965         ++adapter->num_vlans;
2966         ixgbe_setup_vlan_hw_support(adapter);
2967         IXGBE_CORE_UNLOCK(adapter);
2968 }
2969
2970 /*
2971 ** This routine is run via an vlan
2972 ** unconfig EVENT, remove our entry
2973 ** in the soft vfta.
2974 */
2975 static void
2976 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2977 {
2978         struct adapter  *adapter = ifp->if_softc;
2979         u16             index, bit;
2980
2981         if (ifp->if_softc !=  arg)
2982                 return;
2983
2984         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2985                 return;
2986
2987         IXGBE_CORE_LOCK(adapter);
2988         index = (vtag >> 5) & 0x7F;
2989         bit = vtag & 0x1F;
2990         adapter->shadow_vfta[index] &= ~(1 << bit);
2991         --adapter->num_vlans;
2992         /* Re-init to load the changes */
2993         ixgbe_setup_vlan_hw_support(adapter);
2994         IXGBE_CORE_UNLOCK(adapter);
2995 }
2996
2997 static void
2998 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2999 {
3000         struct ifnet    *ifp = adapter->ifp;
3001         struct ixgbe_hw *hw = &adapter->hw;
3002         struct rx_ring  *rxr;
3003         u32             ctrl;
3004
3005
3006         /*
3007         ** We get here thru init_locked, meaning
3008         ** a soft reset, this has already cleared
3009         ** the VFTA and other state, so if there
3010         ** have been no vlan's registered do nothing.
3011         */
3012         if (adapter->num_vlans == 0)
3013                 return;
3014
3015         /* Setup the queues for vlans */
3016         for (int i = 0; i < adapter->num_queues; i++) {
3017                 rxr = &adapter->rx_rings[i];
3018                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3019                 if (hw->mac.type != ixgbe_mac_82598EB) {
3020                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3021                         ctrl |= IXGBE_RXDCTL_VME;
3022                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3023                 }
3024                 rxr->vtag_strip = TRUE;
3025         }
3026
3027         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3028                 return;
3029         /*
3030         ** A soft reset zero's out the VFTA, so
3031         ** we need to repopulate it now.
3032         */
3033         for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3034                 if (adapter->shadow_vfta[i] != 0)
3035                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3036                             adapter->shadow_vfta[i]);
3037
3038         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3039         /* Enable the Filter Table if enabled */
3040         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3041                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3042                 ctrl |= IXGBE_VLNCTRL_VFE;
3043         }
3044         if (hw->mac.type == ixgbe_mac_82598EB)
3045                 ctrl |= IXGBE_VLNCTRL_VME;
3046         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3047 }
3048
3049 static void
3050 ixgbe_enable_intr(struct adapter *adapter)
3051 {
3052         struct ixgbe_hw *hw = &adapter->hw;
3053         struct ix_queue *que = adapter->queues;
3054         u32             mask, fwsm;
3055
3056         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3057         /* Enable Fan Failure detection */
3058         if (hw->device_id == IXGBE_DEV_ID_82598AT)
3059                     mask |= IXGBE_EIMS_GPI_SDP1;
3060
3061         switch (adapter->hw.mac.type) {
3062                 case ixgbe_mac_82599EB:
3063                         mask |= IXGBE_EIMS_ECC;
3064                         /* Temperature sensor on some adapters */
3065                         mask |= IXGBE_EIMS_GPI_SDP0;
3066                         /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3067                         mask |= IXGBE_EIMS_GPI_SDP1;
3068                         mask |= IXGBE_EIMS_GPI_SDP2;
3069 #ifdef IXGBE_FDIR
3070                         mask |= IXGBE_EIMS_FLOW_DIR;
3071 #endif
3072                         break;
3073                 case ixgbe_mac_X540:
3074                         /* Detect if Thermal Sensor is enabled */
3075                         fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3076                         if (fwsm & IXGBE_FWSM_TS_ENABLED)
3077                                 mask |= IXGBE_EIMS_TS;
3078                         mask |= IXGBE_EIMS_ECC;
3079 #ifdef IXGBE_FDIR
3080                         mask |= IXGBE_EIMS_FLOW_DIR;
3081 #endif
3082                         break;
3083                 case ixgbe_mac_X550:
3084                 case ixgbe_mac_X550EM_x:
3085                         /* MAC thermal sensor is automatically enabled */
3086                         mask |= IXGBE_EIMS_TS;
3087                         /* Some devices use SDP0 for important information */
3088                         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3089                             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3090                                 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3091                         mask |= IXGBE_EIMS_ECC;
3092 #ifdef IXGBE_FDIR
3093                         mask |= IXGBE_EIMS_FLOW_DIR;
3094 #endif
3095                 /* falls through */
3096                 default:
3097                         break;
3098         }
3099
3100         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3101
3102         /* With MSI-X we use auto clear */
3103         if (adapter->msix_mem) {
3104                 mask = IXGBE_EIMS_ENABLE_MASK;
3105                 /* Don't autoclear Link */
3106                 mask &= ~IXGBE_EIMS_OTHER;
3107                 mask &= ~IXGBE_EIMS_LSC;
3108                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3109         }
3110
3111         /*
3112         ** Now enable all queues, this is done separately to
3113         ** allow for handling the extended (beyond 32) MSIX
3114         ** vectors that can be used by 82599
3115         */
3116         for (int i = 0; i < adapter->num_queues; i++, que++)
3117                 ixgbe_enable_queue(adapter, que->msix);
3118
3119         IXGBE_WRITE_FLUSH(hw);
3120
3121         return;
3122 }
3123
3124 static void
3125 ixgbe_disable_intr(struct adapter *adapter)
3126 {
3127         if (adapter->msix_mem)
3128                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3129         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3130                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3131         } else {
3132                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3133                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3134                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3135         }
3136         IXGBE_WRITE_FLUSH(&adapter->hw);
3137         return;
3138 }
3139
3140 /*
3141 ** Get the width and transaction speed of
3142 ** the slot this adapter is plugged into.
3143 */
3144 static void
3145 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3146 {
3147         device_t                dev = ((struct ixgbe_osdep *)hw->back)->dev;
3148         struct ixgbe_mac_info   *mac = &hw->mac;
3149         u16                     link;
3150         u32                     offset;
3151
3152         /* For most devices simply call the shared code routine */
3153         if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3154                 ixgbe_get_bus_info(hw);
3155                 /* These devices don't use PCI-E */
3156                 switch (hw->mac.type) {
3157                 case ixgbe_mac_X550EM_x:
3158                         return;
3159                 default:
3160                         goto display;
3161                 }
3162         }
3163
3164         /*
3165         ** For the Quad port adapter we need to parse back
3166         ** up the PCI tree to find the speed of the expansion
3167         ** slot into which this adapter is plugged. A bit more work.
3168         */
3169         dev = device_get_parent(device_get_parent(dev));
3170 #ifdef IXGBE_DEBUG
3171         device_printf(dev, "parent pcib = %x,%x,%x\n",
3172             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3173 #endif
3174         dev = device_get_parent(device_get_parent(dev));
3175 #ifdef IXGBE_DEBUG
3176         device_printf(dev, "slot pcib = %x,%x,%x\n",
3177             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3178 #endif
3179         /* Now get the PCI Express Capabilities offset */
3180         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3181         /* ...and read the Link Status Register */
3182         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3183         switch (link & IXGBE_PCI_LINK_WIDTH) {
3184         case IXGBE_PCI_LINK_WIDTH_1:
3185                 hw->bus.width = ixgbe_bus_width_pcie_x1;
3186                 break;
3187         case IXGBE_PCI_LINK_WIDTH_2:
3188                 hw->bus.width = ixgbe_bus_width_pcie_x2;
3189                 break;
3190         case IXGBE_PCI_LINK_WIDTH_4:
3191                 hw->bus.width = ixgbe_bus_width_pcie_x4;
3192                 break;
3193         case IXGBE_PCI_LINK_WIDTH_8:
3194                 hw->bus.width = ixgbe_bus_width_pcie_x8;
3195                 break;
3196         default:
3197                 hw->bus.width = ixgbe_bus_width_unknown;
3198                 break;
3199         }
3200
3201         switch (link & IXGBE_PCI_LINK_SPEED) {
3202         case IXGBE_PCI_LINK_SPEED_2500:
3203                 hw->bus.speed = ixgbe_bus_speed_2500;
3204                 break;
3205         case IXGBE_PCI_LINK_SPEED_5000:
3206                 hw->bus.speed = ixgbe_bus_speed_5000;
3207                 break;
3208         case IXGBE_PCI_LINK_SPEED_8000:
3209                 hw->bus.speed = ixgbe_bus_speed_8000;
3210                 break;
3211         default:
3212                 hw->bus.speed = ixgbe_bus_speed_unknown;
3213                 break;
3214         }
3215
3216         mac->ops.set_lan_id(hw);
3217
3218 display:
3219         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3220             ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3221             (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3222             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3223             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3224             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3225             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3226             ("Unknown"));
3227
3228         if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3229             ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3230             (hw->bus.speed == ixgbe_bus_speed_2500))) {
3231                 device_printf(dev, "PCI-Express bandwidth available"
3232                     " for this card\n     is not sufficient for"
3233                     " optimal performance.\n");
3234                 device_printf(dev, "For optimal performance a x8 "
3235                     "PCIE, or x4 PCIE Gen2 slot is required.\n");
3236         }
3237         if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3238             ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3239             (hw->bus.speed < ixgbe_bus_speed_8000))) {
3240                 device_printf(dev, "PCI-Express bandwidth available"
3241                     " for this card\n     is not sufficient for"
3242                     " optimal performance.\n");
3243                 device_printf(dev, "For optimal performance a x8 "
3244                     "PCIE Gen3 slot is required.\n");
3245         }
3246
3247         return;
3248 }
3249
3250
3251 /*
3252 ** Setup the correct IVAR register for a particular MSIX interrupt
3253 **   (yes this is all very magic and confusing :)
3254 **  - entry is the register array entry
3255 **  - vector is the MSIX vector for this queue
3256 **  - type is RX/TX/MISC
3257 */
3258 static void
3259 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3260 {
3261         struct ixgbe_hw *hw = &adapter->hw;
3262         u32 ivar, index;
3263
3264         vector |= IXGBE_IVAR_ALLOC_VAL;
3265
3266         switch (hw->mac.type) {
3267
3268         case ixgbe_mac_82598EB:
3269                 if (type == -1)
3270                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3271                 else
3272                         entry += (type * 64);
3273                 index = (entry >> 2) & 0x1F;
3274                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3275                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3276                 ivar |= (vector << (8 * (entry & 0x3)));
3277                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3278                 break;
3279
3280         case ixgbe_mac_82599EB:
3281         case ixgbe_mac_X540:
3282         case ixgbe_mac_X550:
3283         case ixgbe_mac_X550EM_x:
3284                 if (type == -1) { /* MISC IVAR */
3285                         index = (entry & 1) * 8;
3286                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3287                         ivar &= ~(0xFF << index);
3288                         ivar |= (vector << index);
3289                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3290                 } else {        /* RX/TX IVARS */
3291                         index = (16 * (entry & 1)) + (8 * type);
3292                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3293                         ivar &= ~(0xFF << index);
3294                         ivar |= (vector << index);
3295                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3296                 }
3297
3298         default:
3299                 break;
3300         }
3301 }
3302
3303 static void
3304 ixgbe_configure_ivars(struct adapter *adapter)
3305 {
3306         struct  ix_queue *que = adapter->queues;
3307         u32 newitr;
3308
3309         if (ixgbe_max_interrupt_rate > 0)
3310                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3311         else {
3312                 /*
3313                 ** Disable DMA coalescing if interrupt moderation is
3314                 ** disabled.
3315                 */
3316                 adapter->dmac = 0;
3317                 newitr = 0;
3318         }
3319
3320         for (int i = 0; i < adapter->num_queues; i++, que++) {
3321                 /* First the RX queue entry */
3322                 ixgbe_set_ivar(adapter, i, que->msix, 0);
3323                 /* ... and the TX */
3324                 ixgbe_set_ivar(adapter, i, que->msix, 1);
3325                 /* Set an Initial EITR value */
3326                 IXGBE_WRITE_REG(&adapter->hw,
3327                     IXGBE_EITR(que->msix), newitr);
3328         }
3329
3330         /* For the Link interrupt */
3331         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3332 }
3333
3334 /*
3335 ** ixgbe_sfp_probe - called in the local timer to
3336 ** determine if a port had optics inserted.
3337 */  
3338 static bool ixgbe_sfp_probe(struct adapter *adapter)
3339 {
3340         struct ixgbe_hw *hw = &adapter->hw;
3341         device_t        dev = adapter->dev;
3342         bool            result = FALSE;
3343
3344         if ((hw->phy.type == ixgbe_phy_nl) &&
3345             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3346                 s32 ret = hw->phy.ops.identify_sfp(hw);
3347                 if (ret)
3348                         goto out;
3349                 ret = hw->phy.ops.reset(hw);
3350                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3351                         device_printf(dev,"Unsupported SFP+ module detected!");
3352                         printf(" Reload driver with supported module.\n");
3353                         adapter->sfp_probe = FALSE;
3354                         goto out;
3355                 } else
3356                         device_printf(dev,"SFP+ module detected!\n");
3357                 /* We now have supported optics */
3358                 adapter->sfp_probe = FALSE;
3359                 /* Set the optics type so system reports correctly */
3360                 ixgbe_setup_optics(adapter);
3361                 result = TRUE;
3362         }
3363 out:
3364         return (result);
3365 }
3366
3367 /*
3368 ** Tasklet handler for MSIX Link interrupts
3369 **  - do outside interrupt since it might sleep
3370 */
3371 static void
3372 ixgbe_handle_link(void *context, int pending)
3373 {
3374         struct adapter  *adapter = context;
3375
3376         ixgbe_check_link(&adapter->hw,
3377             &adapter->link_speed, &adapter->link_up, 0);
3378         ixgbe_update_link_status(adapter);
3379 }
3380
3381 /*
3382 ** Tasklet for handling SFP module interrupts
3383 */
3384 static void
3385 ixgbe_handle_mod(void *context, int pending)
3386 {
3387         struct adapter  *adapter = context;
3388         struct ixgbe_hw *hw = &adapter->hw;
3389         device_t        dev = adapter->dev;
3390         u32 err;
3391
3392         err = hw->phy.ops.identify_sfp(hw);
3393         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3394                 device_printf(dev,
3395                     "Unsupported SFP+ module type was detected.\n");
3396                 return;
3397         }
3398         err = hw->mac.ops.setup_sfp(hw);
3399         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3400                 device_printf(dev,
3401                     "Setup failure - unsupported SFP+ module type.\n");
3402                 return;
3403         }
3404         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3405         return;
3406 }
3407
3408
3409 /*
3410 ** Tasklet for handling MSF (multispeed fiber) interrupts
3411 */
3412 static void
3413 ixgbe_handle_msf(void *context, int pending)
3414 {
3415         struct adapter  *adapter = context;
3416         struct ixgbe_hw *hw = &adapter->hw;
3417         u32 autoneg;
3418         bool negotiate;
3419         int err;
3420
3421         err = hw->phy.ops.identify_sfp(hw);
3422         if (!err) {
3423                 ixgbe_setup_optics(adapter);
3424                 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3425         }
3426
3427         autoneg = hw->phy.autoneg_advertised;
3428         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3429                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3430         if (hw->mac.ops.setup_link)
3431                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3432
3433         ifmedia_removeall(&adapter->media);
3434         ixgbe_add_media_types(adapter);
3435         return;
3436 }
3437
3438 /*
3439 ** Tasklet for handling interrupts from an external PHY
3440 */
3441 static void
3442 ixgbe_handle_phy(void *context, int pending)
3443 {
3444         struct adapter  *adapter = context;
3445         struct ixgbe_hw *hw = &adapter->hw;
3446         int error;
3447
3448         error = hw->phy.ops.handle_lasi(hw);
3449         if (error == IXGBE_ERR_OVERTEMP)
3450                 device_printf(adapter->dev,
3451                     "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3452                     " PHY will downshift to lower power state!\n");
3453         else if (error)
3454                 device_printf(adapter->dev,
3455                     "Error handling LASI interrupt: %d\n",
3456                     error);
3457         return;
3458 }
3459
3460 #ifdef IXGBE_FDIR
3461 /*
3462 ** Tasklet for reinitializing the Flow Director filter table
3463 */
3464 static void
3465 ixgbe_reinit_fdir(void *context, int pending)
3466 {
3467         struct adapter  *adapter = context;
3468         struct ifnet   *ifp = adapter->ifp;
3469
3470         if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3471                 return;
3472         ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3473         adapter->fdir_reinit = 0;
3474         /* re-enable flow director interrupts */
3475         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3476         /* Restart the interface */
3477         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3478         return;
3479 }
3480 #endif
3481
3482 /*********************************************************************
3483  *
3484  *  Configure DMA Coalescing
3485  *
3486  **********************************************************************/
3487 static void
3488 ixgbe_config_dmac(struct adapter *adapter)
3489 {
3490         struct ixgbe_hw *hw = &adapter->hw;
3491         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3492
3493         if (hw->mac.type < ixgbe_mac_X550 ||
3494             !hw->mac.ops.dmac_config)
3495                 return;
3496
3497         if (dcfg->watchdog_timer ^ adapter->dmac ||
3498             dcfg->link_speed ^ adapter->link_speed) {
3499                 dcfg->watchdog_timer = adapter->dmac;
3500                 dcfg->fcoe_en = false;
3501                 dcfg->link_speed = adapter->link_speed;
3502                 dcfg->num_tcs = 1;
3503                 
3504                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3505                     dcfg->watchdog_timer, dcfg->link_speed);
3506
3507                 hw->mac.ops.dmac_config(hw);
3508         }
3509 }
3510
3511 /*
3512  * Checks whether the adapter supports Energy Efficient Ethernet
3513  * or not, based on device ID.
3514  */
3515 static void
3516 ixgbe_check_eee_support(struct adapter *adapter)
3517 {
3518         struct ixgbe_hw *hw = &adapter->hw;
3519
3520         adapter->eee_support = adapter->eee_enabled =
3521             (hw->device_id == IXGBE_DEV_ID_X550T ||
3522                 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3523 }
3524
3525 /*
3526  * Checks whether the adapter's ports are capable of
3527  * Wake On LAN by reading the adapter's NVM.
3528  *
3529  * Sets each port's hw->wol_enabled value depending
3530  * on the value read here.
3531  */
3532 static void
3533 ixgbe_check_wol_support(struct adapter *adapter)
3534 {
3535         struct ixgbe_hw *hw = &adapter->hw;
3536         u16 dev_caps = 0;
3537
3538         /* Find out WoL support for port */
3539         adapter->wol_support = hw->wol_enabled = 0;
3540         ixgbe_get_device_caps(hw, &dev_caps);
3541         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3542             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3543                 hw->bus.func == 0))
3544             adapter->wol_support = hw->wol_enabled = 1;
3545
3546         /* Save initial wake up filter configuration */
3547         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3548
3549         return;
3550 }
3551
3552 /*
3553  * Prepare the adapter/port for LPLU and/or WoL
3554  */
3555 static int
3556 ixgbe_setup_low_power_mode(struct adapter *adapter)
3557 {
3558         struct ixgbe_hw *hw = &adapter->hw;
3559         device_t dev = adapter->dev;
3560         s32 error = 0;
3561
3562         mtx_assert(&adapter->core_mtx, MA_OWNED);
3563
3564         /* Limit power management flow to X550EM baseT */
3565         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3566             && hw->phy.ops.enter_lplu) {
3567                 /* Turn off support for APM wakeup. (Using ACPI instead) */
3568                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3569                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3570
3571                 /*
3572                  * Clear Wake Up Status register to prevent any previous wakeup
3573                  * events from waking us up immediately after we suspend.
3574                  */
3575                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3576
3577                 /*
3578                  * Program the Wakeup Filter Control register with user filter
3579                  * settings
3580                  */
3581                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3582
3583                 /* Enable wakeups and power management in Wakeup Control */
3584                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3585                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3586
3587                 /* X550EM baseT adapters need a special LPLU flow */
3588                 hw->phy.reset_disable = true;
3589                 ixgbe_stop(adapter);
3590                 error = hw->phy.ops.enter_lplu(hw);
3591                 if (error)
3592                         device_printf(dev,
3593                             "Error entering LPLU: %d\n", error);
3594                 hw->phy.reset_disable = false;
3595         } else {
3596                 /* Just stop for other adapters */
3597                 ixgbe_stop(adapter);
3598         }
3599
3600         return error;
3601 }
3602
3603 /**********************************************************************
3604  *
3605  *  Update the board statistics counters.
3606  *
3607  **********************************************************************/
3608 static void
3609 ixgbe_update_stats_counters(struct adapter *adapter)
3610 {
3611         struct ixgbe_hw *hw = &adapter->hw;
3612         u32 missed_rx = 0, bprc, lxon, lxoff, total;
3613         u64 total_missed_rx = 0;
3614
3615         adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3616         adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3617         adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3618         adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3619
3620         for (int i = 0; i < 16; i++) {
3621                 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3622                 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3623                 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3624         }
3625         adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3626         adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3627         adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3628
3629         /* Hardware workaround, gprc counts missed packets */
3630         adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3631         adapter->stats.pf.gprc -= missed_rx;
3632
3633         if (hw->mac.type != ixgbe_mac_82598EB) {
3634                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3635                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3636                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3637                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3638                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3639                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3640                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3641                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3642         } else {
3643                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3644                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3645                 /* 82598 only has a counter in the high register */
3646                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3647                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3648                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3649         }
3650
3651         /*
3652          * Workaround: mprc hardware is incorrectly counting
3653          * broadcasts, so for now we subtract those.
3654          */
3655         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3656         adapter->stats.pf.bprc += bprc;
3657         adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3658         if (hw->mac.type == ixgbe_mac_82598EB)
3659                 adapter->stats.pf.mprc -= bprc;
3660
3661         adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3662         adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3663         adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3664         adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3665         adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3666         adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3667
3668         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3669         adapter->stats.pf.lxontxc += lxon;
3670         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3671         adapter->stats.pf.lxofftxc += lxoff;
3672         total = lxon + lxoff;
3673
3674         adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3675         adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3676         adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3677         adapter->stats.pf.gptc -= total;
3678         adapter->stats.pf.mptc -= total;
3679         adapter->stats.pf.ptc64 -= total;
3680         adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3681
3682         adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3683         adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3684         adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3685         adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3686         adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3687         adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3688         adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3689         adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3690         adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3691         adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3692         adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3693         adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3694         adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3695         adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3696         adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3697         adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3698         adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3699         adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3700         /* Only read FCOE on 82599 */
3701         if (hw->mac.type != ixgbe_mac_82598EB) {
3702                 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3703                 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3704                 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3705                 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3706                 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3707         }
3708
3709         /* Fill out the OS statistics structure */
3710         IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3711         IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3712         IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3713         IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3714         IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3715         IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3716         IXGBE_SET_COLLISIONS(adapter, 0);
3717         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3718         IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3719             + adapter->stats.pf.rlec);
3720 }
3721
3722 #if __FreeBSD_version >= 1100036
3723 static uint64_t
3724 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3725 {
3726         struct adapter *adapter;
3727         struct tx_ring *txr;
3728         uint64_t rv;
3729
3730         adapter = if_getsoftc(ifp);
3731
3732         switch (cnt) {
3733         case IFCOUNTER_IPACKETS:
3734                 return (adapter->ipackets);
3735         case IFCOUNTER_OPACKETS:
3736                 return (adapter->opackets);
3737         case IFCOUNTER_IBYTES:
3738                 return (adapter->ibytes);
3739         case IFCOUNTER_OBYTES:
3740                 return (adapter->obytes);
3741         case IFCOUNTER_IMCASTS:
3742                 return (adapter->imcasts);
3743         case IFCOUNTER_OMCASTS:
3744                 return (adapter->omcasts);
3745         case IFCOUNTER_COLLISIONS:
3746                 return (0);
3747         case IFCOUNTER_IQDROPS:
3748                 return (adapter->iqdrops);
3749         case IFCOUNTER_OQDROPS:
3750                 rv = 0;
3751                 txr = adapter->tx_rings;
3752                 for (int i = 0; i < adapter->num_queues; i++, txr++)
3753                         rv += txr->br->br_drops;
3754                 return (rv);
3755         case IFCOUNTER_IERRORS:
3756                 return (adapter->ierrors);
3757         default:
3758                 return (if_get_counter_default(ifp, cnt));
3759         }
3760 }
3761 #endif
3762
3763 /** ixgbe_sysctl_tdh_handler - Handler function
3764  *  Retrieves the TDH value from the hardware
3765  */
3766 static int 
3767 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3768 {
3769         int error;
3770
3771         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3772         if (!txr) return 0;
3773
3774         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3775         error = sysctl_handle_int(oidp, &val, 0, req);
3776         if (error || !req->newptr)
3777                 return error;
3778         return 0;
3779 }
3780
3781 /** ixgbe_sysctl_tdt_handler - Handler function
3782  *  Retrieves the TDT value from the hardware
3783  */
3784 static int 
3785 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3786 {
3787         int error;
3788
3789         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3790         if (!txr) return 0;
3791
3792         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3793         error = sysctl_handle_int(oidp, &val, 0, req);
3794         if (error || !req->newptr)
3795                 return error;
3796         return 0;
3797 }
3798
3799 /** ixgbe_sysctl_rdh_handler - Handler function
3800  *  Retrieves the RDH value from the hardware
3801  */
3802 static int 
3803 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3804 {
3805         int error;
3806
3807         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3808         if (!rxr) return 0;
3809
3810         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3811         error = sysctl_handle_int(oidp, &val, 0, req);
3812         if (error || !req->newptr)
3813                 return error;
3814         return 0;
3815 }
3816
3817 /** ixgbe_sysctl_rdt_handler - Handler function
3818  *  Retrieves the RDT value from the hardware
3819  */
3820 static int 
3821 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3822 {
3823         int error;
3824
3825         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3826         if (!rxr) return 0;
3827
3828         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3829         error = sysctl_handle_int(oidp, &val, 0, req);
3830         if (error || !req->newptr)
3831                 return error;
3832         return 0;
3833 }
3834
3835 static int
3836 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3837 {
3838         int error;
3839         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3840         unsigned int reg, usec, rate;
3841
3842         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3843         usec = ((reg & 0x0FF8) >> 3);
3844         if (usec > 0)
3845                 rate = 500000 / usec;
3846         else
3847                 rate = 0;
3848         error = sysctl_handle_int(oidp, &rate, 0, req);
3849         if (error || !req->newptr)
3850                 return error;
3851         reg &= ~0xfff; /* default, no limitation */
3852         ixgbe_max_interrupt_rate = 0;
3853         if (rate > 0 && rate < 500000) {
3854                 if (rate < 1000)
3855                         rate = 1000;
3856                 ixgbe_max_interrupt_rate = rate;
3857                 reg |= ((4000000/rate) & 0xff8 );
3858         }
3859         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3860         return 0;
3861 }
3862
3863 static void
3864 ixgbe_add_device_sysctls(struct adapter *adapter)
3865 {
3866         device_t dev = adapter->dev;
3867         struct ixgbe_hw *hw = &adapter->hw;
3868         struct sysctl_oid_list *child;
3869         struct sysctl_ctx_list *ctx;
3870
3871         ctx = device_get_sysctl_ctx(dev);
3872         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3873
3874         /* Sysctls for all devices */
3875         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3876                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3877                         ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3878
3879         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3880                         CTLFLAG_RW,
3881                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
3882
3883         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3884                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3885                         ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3886
3887         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3888                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3889                         ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3890
3891         /* for X550 devices */
3892         if (hw->mac.type >= ixgbe_mac_X550)
3893                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3894                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3895                                 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3896
3897         /* for X550T and X550EM backplane devices */
3898         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3899             hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3900                 struct sysctl_oid *eee_node;
3901                 struct sysctl_oid_list *eee_list;
3902
3903                 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3904                                            CTLFLAG_RD, NULL,
3905                                            "Energy Efficient Ethernet sysctls");
3906                 eee_list = SYSCTL_CHILDREN(eee_node);
3907
3908                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3909                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3910                                 ixgbe_sysctl_eee_enable, "I",
3911                                 "Enable or Disable EEE");
3912
3913                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3914                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3915                                 ixgbe_sysctl_eee_negotiated, "I",
3916                                 "EEE negotiated on link");
3917
3918                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3919                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3920                                 ixgbe_sysctl_eee_tx_lpi_status, "I",
3921                                 "Whether or not TX link is in LPI state");
3922
3923                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3924                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3925                                 ixgbe_sysctl_eee_rx_lpi_status, "I",
3926                                 "Whether or not RX link is in LPI state");
3927         }
3928
3929         /* for certain 10GBaseT devices */
3930         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3931             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3932                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3933                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3934                                 ixgbe_sysctl_wol_enable, "I",
3935                                 "Enable/Disable Wake on LAN");
3936
3937                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3938                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3939                                 ixgbe_sysctl_wufc, "I",
3940                                 "Enable/Disable Wake Up Filters");
3941         }
3942
3943         /* for X550EM 10GBaseT devices */
3944         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3945                 struct sysctl_oid *phy_node;
3946                 struct sysctl_oid_list *phy_list;
3947
3948                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3949                                            CTLFLAG_RD, NULL,
3950                                            "External PHY sysctls");
3951                 phy_list = SYSCTL_CHILDREN(phy_node);
3952
3953                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3954                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3955                                 ixgbe_sysctl_phy_temp, "I",
3956                                 "Current External PHY Temperature (Celsius)");
3957
3958                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3959                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3960                                 ixgbe_sysctl_phy_overtemp_occurred, "I",
3961                                 "External PHY High Temperature Event Occurred");
3962         }
3963 }
3964
3965 /*
3966  * Add sysctl variables, one per statistic, to the system.
3967  */
3968 static void
3969 ixgbe_add_hw_stats(struct adapter *adapter)
3970 {
3971         device_t dev = adapter->dev;
3972
3973         struct tx_ring *txr = adapter->tx_rings;
3974         struct rx_ring *rxr = adapter->rx_rings;
3975
3976         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3977         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3978         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3979         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3980
3981         struct sysctl_oid *stat_node, *queue_node;
3982         struct sysctl_oid_list *stat_list, *queue_list;
3983
3984 #define QUEUE_NAME_LEN 32
3985         char namebuf[QUEUE_NAME_LEN];
3986
3987         /* Driver Statistics */
3988         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3989                         CTLFLAG_RD, &adapter->dropped_pkts,
3990                         "Driver dropped packets");
3991         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3992                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3993                         "m_defrag() failed");
3994         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3995                         CTLFLAG_RD, &adapter->watchdog_events,
3996                         "Watchdog timeouts");
3997         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3998                         CTLFLAG_RD, &adapter->link_irq,
3999                         "Link MSIX IRQ Handled");
4000
4001         for (int i = 0; i < adapter->num_queues; i++, txr++) {
4002                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4003                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4004                                             CTLFLAG_RD, NULL, "Queue Name");
4005                 queue_list = SYSCTL_CHILDREN(queue_node);
4006
4007                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4008                                 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4009                                 sizeof(&adapter->queues[i]),
4010                                 ixgbe_sysctl_interrupt_rate_handler, "IU",
4011                                 "Interrupt Rate");
4012                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4013                                 CTLFLAG_RD, &(adapter->queues[i].irqs),
4014                                 "irqs on this queue");
4015                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
4016                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4017                                 ixgbe_sysctl_tdh_handler, "IU",
4018                                 "Transmit Descriptor Head");
4019                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
4020                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4021                                 ixgbe_sysctl_tdt_handler, "IU",
4022                                 "Transmit Descriptor Tail");
4023                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4024                                 CTLFLAG_RD, &txr->tso_tx,
4025                                 "TSO");
4026                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4027                                 CTLFLAG_RD, &txr->no_tx_dma_setup,
4028                                 "Driver tx dma failure in xmit");
4029                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4030                                 CTLFLAG_RD, &txr->no_desc_avail,
4031                                 "Queue No Descriptor Available");
4032                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4033                                 CTLFLAG_RD, &txr->total_packets,
4034                                 "Queue Packets Transmitted");
4035                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4036                                 CTLFLAG_RD, &txr->br->br_drops,
4037                                 "Packets dropped in buf_ring");
4038         }
4039
4040         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4041                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4042                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4043                                             CTLFLAG_RD, NULL, "Queue Name");
4044                 queue_list = SYSCTL_CHILDREN(queue_node);
4045
4046                 struct lro_ctrl *lro = &rxr->lro;
4047
4048                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4049                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4050                                             CTLFLAG_RD, NULL, "Queue Name");
4051                 queue_list = SYSCTL_CHILDREN(queue_node);
4052
4053                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
4054                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4055                                 ixgbe_sysctl_rdh_handler, "IU",
4056                                 "Receive Descriptor Head");
4057                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
4058                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4059                                 ixgbe_sysctl_rdt_handler, "IU",
4060                                 "Receive Descriptor Tail");
4061                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4062                                 CTLFLAG_RD, &rxr->rx_packets,
4063                                 "Queue Packets Received");
4064                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4065                                 CTLFLAG_RD, &rxr->rx_bytes,
4066                                 "Queue Bytes Received");
4067                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4068                                 CTLFLAG_RD, &rxr->rx_copies,
4069                                 "Copied RX Frames");
4070                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4071                                 CTLFLAG_RD, &lro->lro_queued, 0,
4072                                 "LRO Queued");
4073                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4074                                 CTLFLAG_RD, &lro->lro_flushed, 0,
4075                                 "LRO Flushed");
4076         }
4077
4078         /* MAC stats get the own sub node */
4079
4080         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4081                                     CTLFLAG_RD, NULL, "MAC Statistics");
4082         stat_list = SYSCTL_CHILDREN(stat_node);
4083
4084         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4085                         CTLFLAG_RD, &stats->crcerrs,
4086                         "CRC Errors");
4087         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4088                         CTLFLAG_RD, &stats->illerrc,
4089                         "Illegal Byte Errors");
4090         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4091                         CTLFLAG_RD, &stats->errbc,
4092                         "Byte Errors");
4093         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4094                         CTLFLAG_RD, &stats->mspdc,
4095                         "MAC Short Packets Discarded");
4096         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4097                         CTLFLAG_RD, &stats->mlfc,
4098                         "MAC Local Faults");
4099         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4100                         CTLFLAG_RD, &stats->mrfc,
4101                         "MAC Remote Faults");
4102         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4103                         CTLFLAG_RD, &stats->rlec,
4104                         "Receive Length Errors");
4105
4106         /* Flow Control stats */
4107         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4108                         CTLFLAG_RD, &stats->lxontxc,
4109                         "Link XON Transmitted");
4110         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4111                         CTLFLAG_RD, &stats->lxonrxc,
4112                         "Link XON Received");
4113         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4114                         CTLFLAG_RD, &stats->lxofftxc,
4115                         "Link XOFF Transmitted");
4116         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4117                         CTLFLAG_RD, &stats->lxoffrxc,
4118                         "Link XOFF Received");
4119
4120         /* Packet Reception Stats */
4121         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4122                         CTLFLAG_RD, &stats->tor, 
4123                         "Total Octets Received"); 
4124         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4125                         CTLFLAG_RD, &stats->gorc, 
4126                         "Good Octets Received"); 
4127         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4128                         CTLFLAG_RD, &stats->tpr,
4129                         "Total Packets Received");
4130         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4131                         CTLFLAG_RD, &stats->gprc,
4132                         "Good Packets Received");
4133         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4134                         CTLFLAG_RD, &stats->mprc,
4135                         "Multicast Packets Received");
4136         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4137                         CTLFLAG_RD, &stats->bprc,
4138                         "Broadcast Packets Received");
4139         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4140                         CTLFLAG_RD, &stats->prc64,
4141                         "64 byte frames received ");
4142         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4143                         CTLFLAG_RD, &stats->prc127,
4144                         "65-127 byte frames received");
4145         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4146                         CTLFLAG_RD, &stats->prc255,
4147                         "128-255 byte frames received");
4148         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4149                         CTLFLAG_RD, &stats->prc511,
4150                         "256-511 byte frames received");
4151         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4152                         CTLFLAG_RD, &stats->prc1023,
4153                         "512-1023 byte frames received");
4154         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4155                         CTLFLAG_RD, &stats->prc1522,
4156                         "1023-1522 byte frames received");
4157         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4158                         CTLFLAG_RD, &stats->ruc,
4159                         "Receive Undersized");
4160         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4161                         CTLFLAG_RD, &stats->rfc,
4162                         "Fragmented Packets Received ");
4163         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4164                         CTLFLAG_RD, &stats->roc,
4165                         "Oversized Packets Received");
4166         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4167                         CTLFLAG_RD, &stats->rjc,
4168                         "Received Jabber");
4169         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4170                         CTLFLAG_RD, &stats->mngprc,
4171                         "Management Packets Received");
4172         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4173                         CTLFLAG_RD, &stats->mngptc,
4174                         "Management Packets Dropped");
4175         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4176                         CTLFLAG_RD, &stats->xec,
4177                         "Checksum Errors");
4178
4179         /* Packet Transmission Stats */
4180         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4181                         CTLFLAG_RD, &stats->gotc, 
4182                         "Good Octets Transmitted"); 
4183         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4184                         CTLFLAG_RD, &stats->tpt,
4185                         "Total Packets Transmitted");
4186         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4187                         CTLFLAG_RD, &stats->gptc,
4188                         "Good Packets Transmitted");
4189         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4190                         CTLFLAG_RD, &stats->bptc,
4191                         "Broadcast Packets Transmitted");
4192         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4193                         CTLFLAG_RD, &stats->mptc,
4194                         "Multicast Packets Transmitted");
4195         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4196                         CTLFLAG_RD, &stats->mngptc,
4197                         "Management Packets Transmitted");
4198         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4199                         CTLFLAG_RD, &stats->ptc64,
4200                         "64 byte frames transmitted ");
4201         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4202                         CTLFLAG_RD, &stats->ptc127,
4203                         "65-127 byte frames transmitted");
4204         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4205                         CTLFLAG_RD, &stats->ptc255,
4206                         "128-255 byte frames transmitted");
4207         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4208                         CTLFLAG_RD, &stats->ptc511,
4209                         "256-511 byte frames transmitted");
4210         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4211                         CTLFLAG_RD, &stats->ptc1023,
4212                         "512-1023 byte frames transmitted");
4213         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4214                         CTLFLAG_RD, &stats->ptc1522,
4215                         "1024-1522 byte frames transmitted");
4216 }
4217
4218 /*
4219 ** Set flow control using sysctl:
4220 ** Flow control values:
4221 **      0 - off
4222 **      1 - rx pause
4223 **      2 - tx pause
4224 **      3 - full
4225 */
4226 static int
4227 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4228 {
4229         int error, last;
4230         struct adapter *adapter = (struct adapter *) arg1;
4231
4232         last = adapter->fc;
4233         error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4234         if ((error) || (req->newptr == NULL))
4235                 return (error);
4236
4237         /* Don't bother if it's not changed */
4238         if (adapter->fc == last)
4239                 return (0);
4240
4241         switch (adapter->fc) {
4242                 case ixgbe_fc_rx_pause:
4243                 case ixgbe_fc_tx_pause:
4244                 case ixgbe_fc_full:
4245                         adapter->hw.fc.requested_mode = adapter->fc;
4246                         if (adapter->num_queues > 1)
4247                                 ixgbe_disable_rx_drop(adapter);
4248                         break;
4249                 case ixgbe_fc_none:
4250                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
4251                         if (adapter->num_queues > 1)
4252                                 ixgbe_enable_rx_drop(adapter);
4253                         break;
4254                 default:
4255                         adapter->fc = last;
4256                         return (EINVAL);
4257         }
4258         /* Don't autoneg if forcing a value */
4259         adapter->hw.fc.disable_fc_autoneg = TRUE;
4260         ixgbe_fc_enable(&adapter->hw);
4261         return error;
4262 }
4263
4264 /*
4265 ** Control advertised link speed:
4266 **      Flags:
4267 **      0x1 - advertise 100 Mb
4268 **      0x2 - advertise 1G
4269 **      0x4 - advertise 10G
4270 */
4271 static int
4272 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4273 {
4274         int                     error = 0, requested;
4275         struct adapter          *adapter;
4276         device_t                dev;
4277         struct ixgbe_hw         *hw;
4278         ixgbe_link_speed        speed = 0;
4279
4280         adapter = (struct adapter *) arg1;
4281         dev = adapter->dev;
4282         hw = &adapter->hw;
4283
4284         requested = adapter->advertise;
4285         error = sysctl_handle_int(oidp, &requested, 0, req);
4286         if ((error) || (req->newptr == NULL))
4287                 return (error);
4288
4289         /* Checks to validate new value */
4290         if (adapter->advertise == requested) /* no change */
4291                 return (0);
4292
4293         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4294             (hw->phy.multispeed_fiber))) {
4295                 device_printf(dev,
4296                     "Advertised speed can only be set on copper or "
4297                     "multispeed fiber media types.\n");
4298                 return (EINVAL);
4299         }
4300
4301         if (requested < 0x1 || requested > 0x7) {
4302                 device_printf(dev,
4303                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4304                 return (EINVAL);
4305         }
4306
4307         if ((requested & 0x1)
4308             && (hw->mac.type != ixgbe_mac_X540)
4309             && (hw->mac.type != ixgbe_mac_X550)) {
4310                 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4311                 return (EINVAL);
4312         }
4313
4314         /* Set new value and report new advertised mode */
4315         if (requested & 0x1)
4316                 speed |= IXGBE_LINK_SPEED_100_FULL;
4317         if (requested & 0x2)
4318                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4319         if (requested & 0x4)
4320                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4321
4322         hw->mac.autotry_restart = TRUE;
4323         hw->mac.ops.setup_link(hw, speed, TRUE);
4324         adapter->advertise = requested;
4325
4326         return (error);
4327 }
4328
4329 /*
4330  * The following two sysctls are for X550 BaseT devices;
4331  * they deal with the external PHY used in them.
4332  */
4333 static int
4334 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4335 {
4336         struct adapter  *adapter = (struct adapter *) arg1;
4337         struct ixgbe_hw *hw = &adapter->hw;
4338         u16 reg;
4339
4340         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4341                 device_printf(adapter->dev,
4342                     "Device has no supported external thermal sensor.\n");
4343                 return (ENODEV);
4344         }
4345
4346         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4347                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4348                                       &reg)) {
4349                 device_printf(adapter->dev,
4350                     "Error reading from PHY's current temperature register\n");
4351                 return (EAGAIN);
4352         }
4353
4354         /* Shift temp for output */
4355         reg = reg >> 8;
4356
4357         return (sysctl_handle_int(oidp, NULL, reg, req));
4358 }
4359
4360 /*
4361  * Reports whether the current PHY temperature is over
4362  * the overtemp threshold.
4363  *  - This is reported directly from the PHY
4364  */
4365 static int
4366 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4367 {
4368         struct adapter  *adapter = (struct adapter *) arg1;
4369         struct ixgbe_hw *hw = &adapter->hw;
4370         u16 reg;
4371
4372         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4373                 device_printf(adapter->dev,
4374                     "Device has no supported external thermal sensor.\n");
4375                 return (ENODEV);
4376         }
4377
4378         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4379                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4380                                       &reg)) {
4381                 device_printf(adapter->dev,
4382                     "Error reading from PHY's temperature status register\n");
4383                 return (EAGAIN);
4384         }
4385
4386         /* Get occurrence bit */
4387         reg = !!(reg & 0x4000);
4388         return (sysctl_handle_int(oidp, 0, reg, req));
4389 }
4390
4391 /*
4392 ** Thermal Shutdown Trigger (internal MAC)
4393 **   - Set this to 1 to cause an overtemp event to occur
4394 */
4395 static int
4396 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4397 {
4398         struct adapter  *adapter = (struct adapter *) arg1;
4399         struct ixgbe_hw *hw = &adapter->hw;
4400         int error, fire = 0;
4401
4402         error = sysctl_handle_int(oidp, &fire, 0, req);
4403         if ((error) || (req->newptr == NULL))
4404                 return (error);
4405
4406         if (fire) {
4407                 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4408                 reg |= IXGBE_EICR_TS;
4409                 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4410         }
4411
4412         return (0);
4413 }
4414
4415 /*
4416 ** Manage DMA Coalescing.
4417 ** Control values:
4418 **      0/1 - off / on (use default value of 1000)
4419 **
4420 **      Legal timer values are:
4421 **      50,100,250,500,1000,2000,5000,10000
4422 **
4423 **      Turning off interrupt moderation will also turn this off.
4424 */
4425 static int
4426 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4427 {
4428         struct adapter *adapter = (struct adapter *) arg1;
4429         struct ixgbe_hw *hw = &adapter->hw;
4430         struct ifnet *ifp = adapter->ifp;
4431         int             error;
4432         u16             oldval;
4433
4434         oldval = adapter->dmac;
4435         error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4436         if ((error) || (req->newptr == NULL))
4437                 return (error);
4438
4439         switch (hw->mac.type) {
4440         case ixgbe_mac_X550:
4441         case ixgbe_mac_X550EM_x:
4442                 break;
4443         default:
4444                 device_printf(adapter->dev,
4445                     "DMA Coalescing is only supported on X550 devices\n");
4446                 return (ENODEV);
4447         }
4448
4449         switch (adapter->dmac) {
4450         case 0:
4451                 /* Disabled */
4452                 break;
4453         case 1: /* Enable and use default */
4454                 adapter->dmac = 1000;
4455                 break;
4456         case 50:
4457         case 100:
4458         case 250:
4459         case 500:
4460         case 1000:
4461         case 2000:
4462         case 5000:
4463         case 10000:
4464                 /* Legal values - allow */
4465                 break;
4466         default:
4467                 /* Do nothing, illegal value */
4468                 adapter->dmac = oldval;
4469                 return (EINVAL);
4470         }
4471
4472         /* Re-initialize hardware if it's already running */
4473         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4474                 ixgbe_init(adapter);
4475
4476         return (0);
4477 }
4478
4479 /*
4480  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4481  * Values:
4482  *      0 - disabled
4483  *      1 - enabled
4484  */
4485 static int
4486 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4487 {
4488         struct adapter *adapter = (struct adapter *) arg1;
4489         struct ixgbe_hw *hw = &adapter->hw;
4490         int new_wol_enabled;
4491         int error = 0;
4492
4493         new_wol_enabled = hw->wol_enabled;
4494         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4495         if ((error) || (req->newptr == NULL))
4496                 return (error);
4497         if (new_wol_enabled == hw->wol_enabled)
4498                 return (0);
4499
4500         if (new_wol_enabled > 0 && !adapter->wol_support)
4501                 return (ENODEV);
4502         else
4503                 hw->wol_enabled = !!(new_wol_enabled);
4504
4505         return (0);
4506 }
4507
4508 /*
4509  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4510  * if supported by the adapter.
4511  * Values:
4512  *      0 - disabled
4513  *      1 - enabled
4514  */
4515 static int
4516 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4517 {
4518         struct adapter *adapter = (struct adapter *) arg1;
4519         struct ifnet *ifp = adapter->ifp;
4520         int new_eee_enabled, error = 0;
4521
4522         new_eee_enabled = adapter->eee_enabled;
4523         error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4524         if ((error) || (req->newptr == NULL))
4525                 return (error);
4526         if (new_eee_enabled == adapter->eee_enabled)
4527                 return (0);
4528
4529         if (new_eee_enabled > 0 && !adapter->eee_support)
4530                 return (ENODEV);
4531         else
4532                 adapter->eee_enabled = !!(new_eee_enabled);
4533
4534         /* Re-initialize hardware if it's already running */
4535         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4536                 ixgbe_init(adapter);
4537
4538         return (0);
4539 }
4540
4541 /*
4542  * Read-only sysctl indicating whether EEE support was negotiated
4543  * on the link.
4544  */
4545 static int
4546 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4547 {
4548         struct adapter *adapter = (struct adapter *) arg1;
4549         struct ixgbe_hw *hw = &adapter->hw;
4550         bool status;
4551
4552         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4553
4554         return (sysctl_handle_int(oidp, 0, status, req));
4555 }
4556
4557 /*
4558  * Read-only sysctl indicating whether RX Link is in LPI state.
4559  */
4560 static int
4561 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4562 {
4563         struct adapter *adapter = (struct adapter *) arg1;
4564         struct ixgbe_hw *hw = &adapter->hw;
4565         bool status;
4566
4567         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4568             IXGBE_EEE_RX_LPI_STATUS);
4569
4570         return (sysctl_handle_int(oidp, 0, status, req));
4571 }
4572
4573 /*
4574  * Read-only sysctl indicating whether TX Link is in LPI state.
4575  */
4576 static int
4577 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4578 {
4579         struct adapter *adapter = (struct adapter *) arg1;
4580         struct ixgbe_hw *hw = &adapter->hw;
4581         bool status;
4582
4583         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4584             IXGBE_EEE_TX_LPI_STATUS);
4585
4586         return (sysctl_handle_int(oidp, 0, status, req));
4587 }
4588
4589 /*
4590  * Sysctl to enable/disable the types of packets that the
4591  * adapter will wake up on upon receipt.
4592  * WUFC - Wake Up Filter Control
4593  * Flags:
4594  *      0x1  - Link Status Change
4595  *      0x2  - Magic Packet
4596  *      0x4  - Direct Exact
4597  *      0x8  - Directed Multicast
4598  *      0x10 - Broadcast
4599  *      0x20 - ARP/IPv4 Request Packet
4600  *      0x40 - Direct IPv4 Packet
4601  *      0x80 - Direct IPv6 Packet
4602  *
4603  * Setting another flag will cause the sysctl to return an
4604  * error.
4605  */
4606 static int
4607 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4608 {
4609         struct adapter *adapter = (struct adapter *) arg1;
4610         int error = 0;
4611         u32 new_wufc;
4612
4613         new_wufc = adapter->wufc;
4614
4615         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4616         if ((error) || (req->newptr == NULL))
4617                 return (error);
4618         if (new_wufc == adapter->wufc)
4619                 return (0);
4620
4621         if (new_wufc & 0xffffff00)
4622                 return (EINVAL);
4623         else {
4624                 new_wufc &= 0xff;
4625                 new_wufc |= (0xffffff & adapter->wufc);
4626                 adapter->wufc = new_wufc;
4627         }
4628
4629         return (0);
4630 }
4631
4632 /*
4633 ** Enable the hardware to drop packets when the buffer is
4634 ** full. This is useful when multiqueue,so that no single
4635 ** queue being full stalls the entire RX engine. We only
4636 ** enable this when Multiqueue AND when Flow Control is 
4637 ** disabled.
4638 */
4639 static void
4640 ixgbe_enable_rx_drop(struct adapter *adapter)
4641 {
4642         struct ixgbe_hw *hw = &adapter->hw;
4643
4644         for (int i = 0; i < adapter->num_queues; i++) {
4645                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4646                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4647                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4648         }
4649 }
4650
4651 static void
4652 ixgbe_disable_rx_drop(struct adapter *adapter)
4653 {
4654         struct ixgbe_hw *hw = &adapter->hw;
4655
4656         for (int i = 0; i < adapter->num_queues; i++) {
4657                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4658                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4659                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4660         }
4661 }
4662
4663 static void
4664 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4665 {
4666         u32 mask;
4667
4668         switch (adapter->hw.mac.type) {
4669         case ixgbe_mac_82598EB:
4670                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4671                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4672                 break;
4673         case ixgbe_mac_82599EB:
4674         case ixgbe_mac_X540:
4675         case ixgbe_mac_X550:
4676         case ixgbe_mac_X550EM_x:
4677                 mask = (queues & 0xFFFFFFFF);
4678                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4679                 mask = (queues >> 32);
4680                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4681                 break;
4682         default:
4683                 break;
4684         }
4685 }
4686
4687