]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/ixgbe/if_ix.c
Re-add if_hw_tso* assignments to if_ix.c that were removed in r283668.
[FreeBSD/stable/10.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #endif
40
41 #include "ixgbe.h"
42
43 /*********************************************************************
44  *  Set this to one to display debug statistics
45  *********************************************************************/
46 int             ixgbe_display_debug_stats = 0;
47
48 /*********************************************************************
49  *  Driver version
50  *********************************************************************/
51 char ixgbe_driver_version[] = "2.8.3";
52
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into ixgbe_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62
63 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
64 {
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
95         /* required last entry */
96         {0, 0, 0, 0, 0}
97 };
98
99 /*********************************************************************
100  *  Table of branding strings
101  *********************************************************************/
102
103 static char    *ixgbe_strings[] = {
104         "Intel(R) PRO/10GbE PCI-Express Network Driver"
105 };
106
107 /*********************************************************************
108  *  Function prototypes
109  *********************************************************************/
110 static int      ixgbe_probe(device_t);
111 static int      ixgbe_attach(device_t);
112 static int      ixgbe_detach(device_t);
113 static int      ixgbe_shutdown(device_t);
114 static int      ixgbe_suspend(device_t);
115 static int      ixgbe_resume(device_t);
116 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
117 static void     ixgbe_init(void *);
118 static void     ixgbe_init_locked(struct adapter *);
119 static void     ixgbe_stop(void *);
120 #if __FreeBSD_version >= 1100036
121 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
122 #endif
123 static void     ixgbe_add_media_types(struct adapter *);
124 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
125 static int      ixgbe_media_change(struct ifnet *);
126 static void     ixgbe_identify_hardware(struct adapter *);
127 static int      ixgbe_allocate_pci_resources(struct adapter *);
128 static void     ixgbe_get_slot_info(struct ixgbe_hw *);
129 static int      ixgbe_allocate_msix(struct adapter *);
130 static int      ixgbe_allocate_legacy(struct adapter *);
131 static int      ixgbe_setup_msix(struct adapter *);
132 static void     ixgbe_free_pci_resources(struct adapter *);
133 static void     ixgbe_local_timer(void *);
134 static int      ixgbe_setup_interface(device_t, struct adapter *);
135 static void     ixgbe_config_dmac(struct adapter *);
136 static void     ixgbe_config_delay_values(struct adapter *);
137 static void     ixgbe_config_link(struct adapter *);
138 static void     ixgbe_check_eee_support(struct adapter *);
139 static void     ixgbe_check_wol_support(struct adapter *);
140 static int      ixgbe_setup_low_power_mode(struct adapter *);
141 static void     ixgbe_rearm_queues(struct adapter *, u64);
142
143 static void     ixgbe_initialize_transmit_units(struct adapter *);
144 static void     ixgbe_initialize_receive_units(struct adapter *);
145 static void     ixgbe_enable_rx_drop(struct adapter *);
146 static void     ixgbe_disable_rx_drop(struct adapter *);
147
148 static void     ixgbe_enable_intr(struct adapter *);
149 static void     ixgbe_disable_intr(struct adapter *);
150 static void     ixgbe_update_stats_counters(struct adapter *);
151 static void     ixgbe_set_promisc(struct adapter *);
152 static void     ixgbe_set_multi(struct adapter *);
153 static void     ixgbe_update_link_status(struct adapter *);
154 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
155 static void     ixgbe_configure_ivars(struct adapter *);
156 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
157
158 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
159 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
160 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
161
162 static void     ixgbe_add_device_sysctls(struct adapter *);
163 static void     ixgbe_add_hw_stats(struct adapter *);
164
165 /* Sysctl handlers */
166 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
167 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
168 static int      ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS);
169 static int      ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS);
170 static int      ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS);
171 static int      ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS);
172 static int      ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
173 static int      ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
174 static int      ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
175 static int      ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS);
176 static int      ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS);
177 static int      ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS);
178
179 /* Support for pluggable optic modules */
180 static bool     ixgbe_sfp_probe(struct adapter *);
181 static void     ixgbe_setup_optics(struct adapter *);
182
183 /* Legacy (single vector interrupt handler */
184 static void     ixgbe_legacy_irq(void *);
185
186 /* The MSI/X Interrupt handlers */
187 static void     ixgbe_msix_que(void *);
188 static void     ixgbe_msix_link(void *);
189
190 /* Deferred interrupt tasklets */
191 static void     ixgbe_handle_que(void *, int);
192 static void     ixgbe_handle_link(void *, int);
193 static void     ixgbe_handle_msf(void *, int);
194 static void     ixgbe_handle_mod(void *, int);
195 static void     ixgbe_handle_phy(void *, int);
196
197 #ifdef IXGBE_FDIR
198 static void     ixgbe_reinit_fdir(void *, int);
199 #endif
200
201 /*********************************************************************
202  *  FreeBSD Device Interface Entry Points
203  *********************************************************************/
204
205 static device_method_t ix_methods[] = {
206         /* Device interface */
207         DEVMETHOD(device_probe, ixgbe_probe),
208         DEVMETHOD(device_attach, ixgbe_attach),
209         DEVMETHOD(device_detach, ixgbe_detach),
210         DEVMETHOD(device_shutdown, ixgbe_shutdown),
211         DEVMETHOD(device_suspend, ixgbe_suspend),
212         DEVMETHOD(device_resume, ixgbe_resume),
213         DEVMETHOD_END
214 };
215
216 static driver_t ix_driver = {
217         "ix", ix_methods, sizeof(struct adapter),
218 };
219
220 devclass_t ix_devclass;
221 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
222
223 MODULE_DEPEND(ix, pci, 1, 1, 1);
224 MODULE_DEPEND(ix, ether, 1, 1, 1);
225
226 /*
227 ** TUNEABLE PARAMETERS:
228 */
229
230 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
231                    "IXGBE driver parameters");
232
233 /*
234 ** AIM: Adaptive Interrupt Moderation
235 ** which means that the interrupt rate
236 ** is varied over time based on the
237 ** traffic for that interrupt vector
238 */
239 static int ixgbe_enable_aim = TRUE;
240 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
241     "Enable adaptive interrupt moderation");
242
243 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
244 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
245     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
246
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 256;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
251     &ixgbe_rx_process_limit, 0,
252     "Maximum number of received packets to process at a time,"
253     "-1 means unlimited");
254
255 /* How many packets txeof tries to clean at a time */
256 static int ixgbe_tx_process_limit = 256;
257 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
258 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
259     &ixgbe_tx_process_limit, 0,
260     "Maximum number of sent packets to process at a time,"
261     "-1 means unlimited");
262
263 /*
264 ** Smart speed setting, default to on
265 ** this only works as a compile option
266 ** right now as its during attach, set
267 ** this to 'ixgbe_smart_speed_off' to
268 ** disable.
269 */
270 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
271
272 /*
273  * MSIX should be the default for best performance,
274  * but this allows it to be forced off for testing.
275  */
276 static int ixgbe_enable_msix = 1;
277 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
278     "Enable MSI-X interrupts");
279
280 /*
281  * Number of Queues, can be set to 0,
282  * it then autoconfigures based on the
283  * number of cpus with a max of 8. This
284  * can be overriden manually here.
285  */
286 static int ixgbe_num_queues = 0;
287 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
288     "Number of queues to configure up to a maximum of 8; "
289     "0 indicates autoconfigure");
290
291 /*
292 ** Number of TX descriptors per ring,
293 ** setting higher than RX as this seems
294 ** the better performing choice.
295 */
296 static int ixgbe_txd = PERFORM_TXD;
297 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
298     "Number of transmit descriptors per queue");
299
300 /* Number of RX descriptors per ring */
301 static int ixgbe_rxd = PERFORM_RXD;
302 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
303     "Number of receive descriptors per queue");
304
305 /*
306 ** Defining this on will allow the use
307 ** of unsupported SFP+ modules, note that
308 ** doing so you are on your own :)
309 */
310 static int allow_unsupported_sfp = FALSE;
311 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
312
313 /* Keep running tab on them for sanity check */
314 static int ixgbe_total_ports;
315
316 #ifdef IXGBE_FDIR
317 /* 
318 ** Flow Director actually 'steals'
319 ** part of the packet buffer as its
320 ** filter pool, this variable controls
321 ** how much it uses:
322 **  0 = 64K, 1 = 128K, 2 = 256K
323 */
324 static int fdir_pballoc = 1;
325 #endif
326
327 #ifdef DEV_NETMAP
328 /*
329  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
330  * be a reference on how to implement netmap support in a driver.
331  * Additional comments are in ixgbe_netmap.h .
332  *
333  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
334  * that extend the standard driver.
335  */
336 #include <dev/netmap/ixgbe_netmap.h>
337 #endif /* DEV_NETMAP */
338
339 /*********************************************************************
340  *  Device identification routine
341  *
342  *  ixgbe_probe determines if the driver should be loaded on
343  *  adapter based on PCI vendor/device id of the adapter.
344  *
345  *  return BUS_PROBE_DEFAULT on success, positive on failure
346  *********************************************************************/
347
348 static int
349 ixgbe_probe(device_t dev)
350 {
351         ixgbe_vendor_info_t *ent;
352
353         u16     pci_vendor_id = 0;
354         u16     pci_device_id = 0;
355         u16     pci_subvendor_id = 0;
356         u16     pci_subdevice_id = 0;
357         char    adapter_name[256];
358
359         INIT_DEBUGOUT("ixgbe_probe: begin");
360
361         pci_vendor_id = pci_get_vendor(dev);
362         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
363                 return (ENXIO);
364
365         pci_device_id = pci_get_device(dev);
366         pci_subvendor_id = pci_get_subvendor(dev);
367         pci_subdevice_id = pci_get_subdevice(dev);
368
369         ent = ixgbe_vendor_info_array;
370         while (ent->vendor_id != 0) {
371                 if ((pci_vendor_id == ent->vendor_id) &&
372                     (pci_device_id == ent->device_id) &&
373
374                     ((pci_subvendor_id == ent->subvendor_id) ||
375                      (ent->subvendor_id == 0)) &&
376
377                     ((pci_subdevice_id == ent->subdevice_id) ||
378                      (ent->subdevice_id == 0))) {
379                         sprintf(adapter_name, "%s, Version - %s",
380                                 ixgbe_strings[ent->index],
381                                 ixgbe_driver_version);
382                         device_set_desc_copy(dev, adapter_name);
383                         ++ixgbe_total_ports;
384                         return (BUS_PROBE_DEFAULT);
385                 }
386                 ent++;
387         }
388         return (ENXIO);
389 }
390
391 /*********************************************************************
392  *  Device initialization routine
393  *
394  *  The attach entry point is called when the driver is being loaded.
395  *  This routine identifies the type of hardware, allocates all resources
396  *  and initializes the hardware.
397  *
398  *  return 0 on success, positive on failure
399  *********************************************************************/
400
401 static int
402 ixgbe_attach(device_t dev)
403 {
404         struct adapter *adapter;
405         struct ixgbe_hw *hw;
406         int             error = 0;
407         u16             csum;
408         u32             ctrl_ext;
409
410         INIT_DEBUGOUT("ixgbe_attach: begin");
411
412         /* Allocate, clear, and link in our adapter structure */
413         adapter = device_get_softc(dev);
414         adapter->dev = adapter->osdep.dev = dev;
415         hw = &adapter->hw;
416
417         /* Core Lock Init*/
418         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
419
420         /* Set up the timer callout */
421         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
422
423         /* Determine hardware revision */
424         ixgbe_identify_hardware(adapter);
425
426         /* Do base PCI setup - map BAR0 */
427         if (ixgbe_allocate_pci_resources(adapter)) {
428                 device_printf(dev, "Allocation of PCI resources failed\n");
429                 error = ENXIO;
430                 goto err_out;
431         }
432
433         /* Do descriptor calc and sanity checks */
434         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
435             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
436                 device_printf(dev, "TXD config issue, using default!\n");
437                 adapter->num_tx_desc = DEFAULT_TXD;
438         } else
439                 adapter->num_tx_desc = ixgbe_txd;
440
441         /*
442         ** With many RX rings it is easy to exceed the
443         ** system mbuf allocation. Tuning nmbclusters
444         ** can alleviate this.
445         */
446         if (nmbclusters > 0) {
447                 int s;
448                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
449                 if (s > nmbclusters) {
450                         device_printf(dev, "RX Descriptors exceed "
451                             "system mbuf max, using default instead!\n");
452                         ixgbe_rxd = DEFAULT_RXD;
453                 }
454         }
455
456         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
457             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
458                 device_printf(dev, "RXD config issue, using default!\n");
459                 adapter->num_rx_desc = DEFAULT_RXD;
460         } else
461                 adapter->num_rx_desc = ixgbe_rxd;
462
463         /* Allocate our TX/RX Queues */
464         if (ixgbe_allocate_queues(adapter)) {
465                 error = ENOMEM;
466                 goto err_out;
467         }
468
469         /* Allocate multicast array memory. */
470         adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
471             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
472         if (adapter->mta == NULL) {
473                 device_printf(dev, "Can not allocate multicast setup array\n");
474                 error = ENOMEM;
475                 goto err_late;
476         }
477
478         /* Initialize the shared code */
479         hw->allow_unsupported_sfp = allow_unsupported_sfp;
480         error = ixgbe_init_shared_code(hw);
481         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
482                 /*
483                 ** No optics in this port, set up
484                 ** so the timer routine will probe 
485                 ** for later insertion.
486                 */
487                 adapter->sfp_probe = TRUE;
488                 error = 0;
489         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
490                 device_printf(dev,"Unsupported SFP+ module detected!\n");
491                 error = EIO;
492                 goto err_late;
493         } else if (error) {
494                 device_printf(dev,"Unable to initialize the shared code\n");
495                 error = EIO;
496                 goto err_late;
497         }
498
499         /* Make sure we have a good EEPROM before we read from it */
500         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
501                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
502                 error = EIO;
503                 goto err_late;
504         }
505
506         error = ixgbe_init_hw(hw);
507         switch (error) {
508         case IXGBE_ERR_EEPROM_VERSION:
509                 device_printf(dev, "This device is a pre-production adapter/"
510                     "LOM.  Please be aware there may be issues associated "
511                     "with your hardware.\n If you are experiencing problems "
512                     "please contact your Intel or hardware representative "
513                     "who provided you with this hardware.\n");
514                 break;
515         case IXGBE_ERR_SFP_NOT_SUPPORTED:
516                 device_printf(dev,"Unsupported SFP+ Module\n");
517                 error = EIO;
518                 goto err_late;
519         case IXGBE_ERR_SFP_NOT_PRESENT:
520                 device_printf(dev,"No SFP+ Module found\n");
521                 /* falls thru */
522         default:
523                 break;
524         }
525
526         /* Detect and set physical type */
527         ixgbe_setup_optics(adapter);
528
529         if ((adapter->msix > 1) && (ixgbe_enable_msix))
530                 error = ixgbe_allocate_msix(adapter); 
531         else
532                 error = ixgbe_allocate_legacy(adapter); 
533         if (error) 
534                 goto err_late;
535
536         /* Setup OS specific network interface */
537         if (ixgbe_setup_interface(dev, adapter) != 0)
538                 goto err_late;
539
540         /* Initialize statistics */
541         ixgbe_update_stats_counters(adapter);
542
543         /* Register for VLAN events */
544         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
545             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
546         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
547             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
548
549         /* Check PCIE slot type/speed/width */
550         ixgbe_get_slot_info(hw);
551
552         /* Set an initial default flow control value */
553         adapter->fc = ixgbe_fc_full;
554
555         /* Check for certain supported features */
556         ixgbe_check_wol_support(adapter);
557         ixgbe_check_eee_support(adapter);
558
559         /* Add sysctls */
560         ixgbe_add_device_sysctls(adapter);
561         ixgbe_add_hw_stats(adapter);
562
563         /* let hardware know driver is loaded */
564         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
565         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
566         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
567
568 #ifdef DEV_NETMAP
569         ixgbe_netmap_attach(adapter);
570 #endif /* DEV_NETMAP */
571         INIT_DEBUGOUT("ixgbe_attach: end");
572         return (0);
573
574 err_late:
575         ixgbe_free_transmit_structures(adapter);
576         ixgbe_free_receive_structures(adapter);
577 err_out:
578         if (adapter->ifp != NULL)
579                 if_free(adapter->ifp);
580         ixgbe_free_pci_resources(adapter);
581         free(adapter->mta, M_DEVBUF);
582         return (error);
583 }
584
585 /*********************************************************************
586  *  Device removal routine
587  *
588  *  The detach entry point is called when the driver is being removed.
589  *  This routine stops the adapter and deallocates all the resources
590  *  that were allocated for driver operation.
591  *
592  *  return 0 on success, positive on failure
593  *********************************************************************/
594
595 static int
596 ixgbe_detach(device_t dev)
597 {
598         struct adapter *adapter = device_get_softc(dev);
599         struct ix_queue *que = adapter->queues;
600         struct tx_ring *txr = adapter->tx_rings;
601         u32     ctrl_ext;
602
603         INIT_DEBUGOUT("ixgbe_detach: begin");
604
605         /* Make sure VLANS are not using driver */
606         if (adapter->ifp->if_vlantrunk != NULL) {
607                 device_printf(dev,"Vlan in use, detach first\n");
608                 return (EBUSY);
609         }
610
611         /* Stop the adapter */
612         IXGBE_CORE_LOCK(adapter);
613         ixgbe_setup_low_power_mode(adapter);
614         IXGBE_CORE_UNLOCK(adapter);
615
616         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
617                 if (que->tq) {
618 #ifndef IXGBE_LEGACY_TX
619                         taskqueue_drain(que->tq, &txr->txq_task);
620 #endif
621                         taskqueue_drain(que->tq, &que->que_task);
622                         taskqueue_free(que->tq);
623                 }
624         }
625
626         /* Drain the Link queue */
627         if (adapter->tq) {
628                 taskqueue_drain(adapter->tq, &adapter->link_task);
629                 taskqueue_drain(adapter->tq, &adapter->mod_task);
630                 taskqueue_drain(adapter->tq, &adapter->msf_task);
631                 taskqueue_drain(adapter->tq, &adapter->phy_task);
632 #ifdef IXGBE_FDIR
633                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
634 #endif
635                 taskqueue_free(adapter->tq);
636         }
637
638         /* let hardware know driver is unloading */
639         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
640         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
641         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
642
643         /* Unregister VLAN events */
644         if (adapter->vlan_attach != NULL)
645                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
646         if (adapter->vlan_detach != NULL)
647                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
648
649         ether_ifdetach(adapter->ifp);
650         callout_drain(&adapter->timer);
651 #ifdef DEV_NETMAP
652         netmap_detach(adapter->ifp);
653 #endif /* DEV_NETMAP */
654         ixgbe_free_pci_resources(adapter);
655         bus_generic_detach(dev);
656         if_free(adapter->ifp);
657
658         ixgbe_free_transmit_structures(adapter);
659         ixgbe_free_receive_structures(adapter);
660         free(adapter->mta, M_DEVBUF);
661
662         IXGBE_CORE_LOCK_DESTROY(adapter);
663         return (0);
664 }
665
666 /*********************************************************************
667  *
668  *  Shutdown entry point
669  *
670  **********************************************************************/
671
672 static int
673 ixgbe_shutdown(device_t dev)
674 {
675         struct adapter *adapter = device_get_softc(dev);
676         int error = 0;
677
678         INIT_DEBUGOUT("ixgbe_shutdown: begin");
679
680         IXGBE_CORE_LOCK(adapter);
681         error = ixgbe_setup_low_power_mode(adapter);
682         IXGBE_CORE_UNLOCK(adapter);
683
684         return (error);
685 }
686
687 /**
688  * Methods for going from:
689  * D0 -> D3: ixgbe_suspend
690  * D3 -> D0: ixgbe_resume
691  */
692 static int
693 ixgbe_suspend(device_t dev)
694 {
695         struct adapter *adapter = device_get_softc(dev);
696         int error = 0;
697
698         INIT_DEBUGOUT("ixgbe_suspend: begin");
699
700         IXGBE_CORE_LOCK(adapter);
701
702         error = ixgbe_setup_low_power_mode(adapter);
703
704         /* Save state and power down */
705         pci_save_state(dev);
706         pci_set_powerstate(dev, PCI_POWERSTATE_D3);
707
708         IXGBE_CORE_UNLOCK(adapter);
709
710         return (error);
711 }
712
713 static int
714 ixgbe_resume(device_t dev)
715 {
716         struct adapter *adapter = device_get_softc(dev);
717         struct ifnet *ifp = adapter->ifp;
718         struct ixgbe_hw *hw = &adapter->hw;
719         u32 wus;
720
721         INIT_DEBUGOUT("ixgbe_resume: begin");
722
723         IXGBE_CORE_LOCK(adapter);
724
725         pci_set_powerstate(dev, PCI_POWERSTATE_D0);
726         pci_restore_state(dev);
727
728         /* Read & clear WUS register */
729         wus = IXGBE_READ_REG(hw, IXGBE_WUS);
730         if (wus)
731                 device_printf(dev, "Woken up by (WUS): %#010x\n",
732                     IXGBE_READ_REG(hw, IXGBE_WUS));
733         IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
734         /* And clear WUFC until next low-power transition */
735         IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
736
737         /*
738          * Required after D3->D0 transition;
739          * will re-advertise all previous advertised speeds
740          */
741         if (ifp->if_flags & IFF_UP)
742                 ixgbe_init_locked(adapter);
743
744         IXGBE_CORE_UNLOCK(adapter);
745
746         INIT_DEBUGOUT("ixgbe_resume: end");
747         return (0);
748 }
749
750
751 /*********************************************************************
752  *  Ioctl entry point
753  *
754  *  ixgbe_ioctl is called when the user wants to configure the
755  *  interface.
756  *
757  *  return 0 on success, positive on failure
758  **********************************************************************/
759
760 static int
761 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
762 {
763         struct adapter  *adapter = ifp->if_softc;
764         struct ifreq    *ifr = (struct ifreq *) data;
765 #if defined(INET) || defined(INET6)
766         struct ifaddr *ifa = (struct ifaddr *)data;
767         bool            avoid_reset = FALSE;
768 #endif
769         int             error = 0;
770
771         switch (command) {
772
773         case SIOCSIFADDR:
774 #ifdef INET
775                 if (ifa->ifa_addr->sa_family == AF_INET)
776                         avoid_reset = TRUE;
777 #endif
778 #ifdef INET6
779                 if (ifa->ifa_addr->sa_family == AF_INET6)
780                         avoid_reset = TRUE;
781 #endif
782 #if defined(INET) || defined(INET6)
783                 /*
784                 ** Calling init results in link renegotiation,
785                 ** so we avoid doing it when possible.
786                 */
787                 if (avoid_reset) {
788                         ifp->if_flags |= IFF_UP;
789                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
790                                 ixgbe_init(adapter);
791                         if (!(ifp->if_flags & IFF_NOARP))
792                                 arp_ifinit(ifp, ifa);
793                 } else
794                         error = ether_ioctl(ifp, command, data);
795 #endif
796                 break;
797         case SIOCSIFMTU:
798                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
799                 if (ifr->ifr_mtu > IXGBE_MAX_MTU) {
800                         error = EINVAL;
801                 } else {
802                         IXGBE_CORE_LOCK(adapter);
803                         ifp->if_mtu = ifr->ifr_mtu;
804                         adapter->max_frame_size =
805                                 ifp->if_mtu + IXGBE_MTU_HDR;
806                         ixgbe_init_locked(adapter);
807                         IXGBE_CORE_UNLOCK(adapter);
808                 }
809                 break;
810         case SIOCSIFFLAGS:
811                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
812                 IXGBE_CORE_LOCK(adapter);
813                 if (ifp->if_flags & IFF_UP) {
814                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
815                                 if ((ifp->if_flags ^ adapter->if_flags) &
816                                     (IFF_PROMISC | IFF_ALLMULTI)) {
817                                         ixgbe_set_promisc(adapter);
818                                 }
819                         } else
820                                 ixgbe_init_locked(adapter);
821                 } else
822                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
823                                 ixgbe_stop(adapter);
824                 adapter->if_flags = ifp->if_flags;
825                 IXGBE_CORE_UNLOCK(adapter);
826                 break;
827         case SIOCADDMULTI:
828         case SIOCDELMULTI:
829                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
830                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
831                         IXGBE_CORE_LOCK(adapter);
832                         ixgbe_disable_intr(adapter);
833                         ixgbe_set_multi(adapter);
834                         ixgbe_enable_intr(adapter);
835                         IXGBE_CORE_UNLOCK(adapter);
836                 }
837                 break;
838         case SIOCSIFMEDIA:
839         case SIOCGIFMEDIA:
840                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
841                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
842                 break;
843         case SIOCSIFCAP:
844         {
845                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
846                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
847                 if (mask & IFCAP_HWCSUM)
848                         ifp->if_capenable ^= IFCAP_HWCSUM;
849                 if (mask & IFCAP_TSO4)
850                         ifp->if_capenable ^= IFCAP_TSO4;
851                 if (mask & IFCAP_TSO6)
852                         ifp->if_capenable ^= IFCAP_TSO6;
853                 if (mask & IFCAP_LRO)
854                         ifp->if_capenable ^= IFCAP_LRO;
855                 if (mask & IFCAP_VLAN_HWTAGGING)
856                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
857                 if (mask & IFCAP_VLAN_HWFILTER)
858                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
859                 if (mask & IFCAP_VLAN_HWTSO)
860                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
861                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862                         IXGBE_CORE_LOCK(adapter);
863                         ixgbe_init_locked(adapter);
864                         IXGBE_CORE_UNLOCK(adapter);
865                 }
866                 VLAN_CAPABILITIES(ifp);
867                 break;
868         }
869 #if __FreeBSD_version >= 1100036
870         case SIOCGI2C:
871         {
872                 struct ixgbe_hw *hw = &adapter->hw;
873                 struct ifi2creq i2c;
874                 int i;
875                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
876                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
877                 if (error != 0)
878                         break;
879                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
880                         error = EINVAL;
881                         break;
882                 }
883                 if (i2c.len > sizeof(i2c.data)) {
884                         error = EINVAL;
885                         break;
886                 }
887
888                 for (i = 0; i < i2c.len; i++)
889                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
890                             i2c.dev_addr, &i2c.data[i]);
891                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
892                 break;
893         }
894 #endif
895         default:
896                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
897                 error = ether_ioctl(ifp, command, data);
898                 break;
899         }
900
901         return (error);
902 }
903
904 /*********************************************************************
905  *  Init entry point
906  *
907  *  This routine is used in two ways. It is used by the stack as
908  *  init entry point in network interface structure. It is also used
909  *  by the driver as a hw/sw initialization routine to get to a
910  *  consistent state.
911  *
912  *  return 0 on success, positive on failure
913  **********************************************************************/
914 #define IXGBE_MHADD_MFS_SHIFT 16
915
916 static void
917 ixgbe_init_locked(struct adapter *adapter)
918 {
919         struct ifnet   *ifp = adapter->ifp;
920         device_t        dev = adapter->dev;
921         struct ixgbe_hw *hw = &adapter->hw;
922         u32             k, txdctl, mhadd, gpie;
923         u32             rxdctl, rxctrl;
924
925         mtx_assert(&adapter->core_mtx, MA_OWNED);
926         INIT_DEBUGOUT("ixgbe_init_locked: begin");
927         hw->adapter_stopped = FALSE;
928         ixgbe_stop_adapter(hw);
929         callout_stop(&adapter->timer);
930
931         /* reprogram the RAR[0] in case user changed it. */
932         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
933
934         /* Get the latest mac address, User can use a LAA */
935         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
936               IXGBE_ETH_LENGTH_OF_ADDRESS);
937         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
938         hw->addr_ctrl.rar_used_count = 1;
939
940         /* Set the various hardware offload abilities */
941         ifp->if_hwassist = 0;
942         if (ifp->if_capenable & IFCAP_TSO)
943                 ifp->if_hwassist |= CSUM_TSO;
944         if (ifp->if_capenable & IFCAP_TXCSUM) {
945                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
946 #if __FreeBSD_version >= 800000
947                 if (hw->mac.type != ixgbe_mac_82598EB)
948                         ifp->if_hwassist |= CSUM_SCTP;
949 #endif
950         }
951
952         /* Prepare transmit descriptors and buffers */
953         if (ixgbe_setup_transmit_structures(adapter)) {
954                 device_printf(dev, "Could not setup transmit structures\n");
955                 ixgbe_stop(adapter);
956                 return;
957         }
958
959         ixgbe_init_hw(hw);
960         ixgbe_initialize_transmit_units(adapter);
961
962         /* Setup Multicast table */
963         ixgbe_set_multi(adapter);
964
965         /*
966         ** Determine the correct mbuf pool
967         ** for doing jumbo frames
968         */
969         if (adapter->max_frame_size <= 2048)
970                 adapter->rx_mbuf_sz = MCLBYTES;
971         else if (adapter->max_frame_size <= 4096)
972                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
973         else if (adapter->max_frame_size <= 9216)
974                 adapter->rx_mbuf_sz = MJUM9BYTES;
975         else
976                 adapter->rx_mbuf_sz = MJUM16BYTES;
977
978         /* Prepare receive descriptors and buffers */
979         if (ixgbe_setup_receive_structures(adapter)) {
980                 device_printf(dev, "Could not setup receive structures\n");
981                 ixgbe_stop(adapter);
982                 return;
983         }
984
985         /* Configure RX settings */
986         ixgbe_initialize_receive_units(adapter);
987
988         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
989
990         /* Enable Fan Failure Interrupt */
991         gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
992
993         /* Add for Module detection */
994         if (hw->mac.type == ixgbe_mac_82599EB)
995                 gpie |= IXGBE_SDP2_GPIEN;
996
997         /*
998          * Thermal Failure Detection (X540)
999          * Link Detection (X552)
1000          */
1001         if (hw->mac.type == ixgbe_mac_X540 ||
1002             hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1003             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
1004                 gpie |= IXGBE_SDP0_GPIEN_X540;
1005
1006         if (adapter->msix > 1) {
1007                 /* Enable Enhanced MSIX mode */
1008                 gpie |= IXGBE_GPIE_MSIX_MODE;
1009                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1010                     IXGBE_GPIE_OCD;
1011         }
1012         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1013
1014         /* Set MTU size */
1015         if (ifp->if_mtu > ETHERMTU) {
1016                 /* aka IXGBE_MAXFRS on 82599 and newer */
1017                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1018                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1019                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1020                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1021         }
1022         
1023         /* Now enable all the queues */
1024         for (int i = 0; i < adapter->num_queues; i++) {
1025                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1026                 txdctl |= IXGBE_TXDCTL_ENABLE;
1027                 /* Set WTHRESH to 8, burst writeback */
1028                 txdctl |= (8 << 16);
1029                 /*
1030                  * When the internal queue falls below PTHRESH (32),
1031                  * start prefetching as long as there are at least
1032                  * HTHRESH (1) buffers ready. The values are taken
1033                  * from the Intel linux driver 3.8.21.
1034                  * Prefetching enables tx line rate even with 1 queue.
1035                  */
1036                 txdctl |= (32 << 0) | (1 << 8);
1037                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1038         }
1039
1040         for (int i = 0; i < adapter->num_queues; i++) {
1041                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1042                 if (hw->mac.type == ixgbe_mac_82598EB) {
1043                         /*
1044                         ** PTHRESH = 21
1045                         ** HTHRESH = 4
1046                         ** WTHRESH = 8
1047                         */
1048                         rxdctl &= ~0x3FFFFF;
1049                         rxdctl |= 0x080420;
1050                 }
1051                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1052                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1053                 for (k = 0; k < 10; k++) {
1054                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1055                             IXGBE_RXDCTL_ENABLE)
1056                                 break;
1057                         else
1058                                 msec_delay(1);
1059                 }
1060                 wmb();
1061 #ifdef DEV_NETMAP
1062                 /*
1063                  * In netmap mode, we must preserve the buffers made
1064                  * available to userspace before the if_init()
1065                  * (this is true by default on the TX side, because
1066                  * init makes all buffers available to userspace).
1067                  *
1068                  * netmap_reset() and the device specific routines
1069                  * (e.g. ixgbe_setup_receive_rings()) map these
1070                  * buffers at the end of the NIC ring, so here we
1071                  * must set the RDT (tail) register to make sure
1072                  * they are not overwritten.
1073                  *
1074                  * In this driver the NIC ring starts at RDH = 0,
1075                  * RDT points to the last slot available for reception (?),
1076                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1077                  */
1078                 if (ifp->if_capenable & IFCAP_NETMAP) {
1079                         struct netmap_adapter *na = NA(adapter->ifp);
1080                         struct netmap_kring *kring = &na->rx_rings[i];
1081                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1082
1083                         IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1084                 } else
1085 #endif /* DEV_NETMAP */
1086                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1087         }
1088
1089         /* Enable Receive engine */
1090         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1091         if (hw->mac.type == ixgbe_mac_82598EB)
1092                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1093         rxctrl |= IXGBE_RXCTRL_RXEN;
1094         ixgbe_enable_rx_dma(hw, rxctrl);
1095
1096         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1097
1098         /* Set up MSI/X routing */
1099         if (ixgbe_enable_msix)  {
1100                 ixgbe_configure_ivars(adapter);
1101                 /* Set up auto-mask */
1102                 if (hw->mac.type == ixgbe_mac_82598EB)
1103                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1104                 else {
1105                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1106                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1107                 }
1108         } else {  /* Simple settings for Legacy/MSI */
1109                 ixgbe_set_ivar(adapter, 0, 0, 0);
1110                 ixgbe_set_ivar(adapter, 0, 0, 1);
1111                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1112         }
1113
1114 #ifdef IXGBE_FDIR
1115         /* Init Flow director */
1116         if (hw->mac.type != ixgbe_mac_82598EB) {
1117                 u32 hdrm = 32 << fdir_pballoc;
1118
1119                 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1120                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1121         }
1122 #endif
1123
1124         /*
1125         ** Check on any SFP devices that
1126         ** need to be kick-started
1127         */
1128         if (hw->phy.type == ixgbe_phy_none) {
1129                 int err = hw->phy.ops.identify(hw);
1130                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1131                         device_printf(dev,
1132                             "Unsupported SFP+ module type was detected.\n");
1133                         return;
1134                 }
1135         }
1136
1137         /* Set moderation on the Link interrupt */
1138         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1139
1140         /* Configure Energy Efficient Ethernet for supported devices */
1141         if (adapter->eee_support)
1142                 ixgbe_setup_eee(hw, adapter->eee_enabled);
1143
1144         /* Config/Enable Link */
1145         ixgbe_config_link(adapter);
1146
1147         /* Hardware Packet Buffer & Flow Control setup */
1148         ixgbe_config_delay_values(adapter);
1149
1150         /* Initialize the FC settings */
1151         ixgbe_start_hw(hw);
1152
1153         /* Set up VLAN support and filter */
1154         ixgbe_setup_vlan_hw_support(adapter);
1155
1156         /* Setup DMA Coalescing */
1157         ixgbe_config_dmac(adapter);
1158
1159         /* And now turn on interrupts */
1160         ixgbe_enable_intr(adapter);
1161
1162         /* Now inform the stack we're ready */
1163         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1164
1165         return;
1166 }
1167
1168 static void
1169 ixgbe_init(void *arg)
1170 {
1171         struct adapter *adapter = arg;
1172
1173         IXGBE_CORE_LOCK(adapter);
1174         ixgbe_init_locked(adapter);
1175         IXGBE_CORE_UNLOCK(adapter);
1176         return;
1177 }
1178
1179 static void
1180 ixgbe_config_delay_values(struct adapter *adapter)
1181 {
1182         struct ixgbe_hw *hw = &adapter->hw;
1183         u32 rxpb, frame, size, tmp;
1184
1185         frame = adapter->max_frame_size;
1186
1187         /* Calculate High Water */
1188         switch (hw->mac.type) {
1189         case ixgbe_mac_X540:
1190         case ixgbe_mac_X550:
1191         case ixgbe_mac_X550EM_x:
1192                 tmp = IXGBE_DV_X540(frame, frame);
1193                 break;
1194         default:
1195                 tmp = IXGBE_DV(frame, frame);
1196                 break;
1197         }
1198         size = IXGBE_BT2KB(tmp);
1199         rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1200         hw->fc.high_water[0] = rxpb - size;
1201
1202         /* Now calculate Low Water */
1203         switch (hw->mac.type) {
1204         case ixgbe_mac_X540:
1205         case ixgbe_mac_X550:
1206         case ixgbe_mac_X550EM_x:
1207                 tmp = IXGBE_LOW_DV_X540(frame);
1208                 break;
1209         default:
1210                 tmp = IXGBE_LOW_DV(frame);
1211                 break;
1212         }
1213         hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1214
1215         hw->fc.requested_mode = adapter->fc;
1216         hw->fc.pause_time = IXGBE_FC_PAUSE;
1217         hw->fc.send_xon = TRUE;
1218 }
1219
1220 /*
1221 **
1222 ** MSIX Interrupt Handlers and Tasklets
1223 **
1224 */
1225
1226 static inline void
1227 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1228 {
1229         struct ixgbe_hw *hw = &adapter->hw;
1230         u64     queue = (u64)(1 << vector);
1231         u32     mask;
1232
1233         if (hw->mac.type == ixgbe_mac_82598EB) {
1234                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1235                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1236         } else {
1237                 mask = (queue & 0xFFFFFFFF);
1238                 if (mask)
1239                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1240                 mask = (queue >> 32);
1241                 if (mask)
1242                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1243         }
1244 }
1245
1246 static inline void
1247 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1248 {
1249         struct ixgbe_hw *hw = &adapter->hw;
1250         u64     queue = (u64)(1 << vector);
1251         u32     mask;
1252
1253         if (hw->mac.type == ixgbe_mac_82598EB) {
1254                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1255                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1256         } else {
1257                 mask = (queue & 0xFFFFFFFF);
1258                 if (mask)
1259                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1260                 mask = (queue >> 32);
1261                 if (mask)
1262                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1263         }
1264 }
1265
1266 static void
1267 ixgbe_handle_que(void *context, int pending)
1268 {
1269         struct ix_queue *que = context;
1270         struct adapter  *adapter = que->adapter;
1271         struct tx_ring  *txr = que->txr;
1272         struct ifnet    *ifp = adapter->ifp;
1273         bool            more;
1274
1275         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1276                 more = ixgbe_rxeof(que);
1277                 IXGBE_TX_LOCK(txr);
1278                 ixgbe_txeof(txr);
1279 #ifndef IXGBE_LEGACY_TX
1280                 if (!drbr_empty(ifp, txr->br))
1281                         ixgbe_mq_start_locked(ifp, txr);
1282 #else
1283                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1284                         ixgbe_start_locked(txr, ifp);
1285 #endif
1286                 IXGBE_TX_UNLOCK(txr);
1287         }
1288
1289         /* Reenable this interrupt */
1290         if (que->res != NULL)
1291                 ixgbe_enable_queue(adapter, que->msix);
1292         else
1293                 ixgbe_enable_intr(adapter);
1294         return;
1295 }
1296
1297
1298 /*********************************************************************
1299  *
1300  *  Legacy Interrupt Service routine
1301  *
1302  **********************************************************************/
1303
1304 static void
1305 ixgbe_legacy_irq(void *arg)
1306 {
1307         struct ix_queue *que = arg;
1308         struct adapter  *adapter = que->adapter;
1309         struct ixgbe_hw *hw = &adapter->hw;
1310         struct ifnet    *ifp = adapter->ifp;
1311         struct          tx_ring *txr = adapter->tx_rings;
1312         bool            more;
1313         u32             reg_eicr;
1314
1315
1316         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1317
1318         ++que->irqs;
1319         if (reg_eicr == 0) {
1320                 ixgbe_enable_intr(adapter);
1321                 return;
1322         }
1323
1324         more = ixgbe_rxeof(que);
1325
1326         IXGBE_TX_LOCK(txr);
1327         ixgbe_txeof(txr);
1328 #ifdef IXGBE_LEGACY_TX
1329         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1330                 ixgbe_start_locked(txr, ifp);
1331 #else
1332         if (!drbr_empty(ifp, txr->br))
1333                 ixgbe_mq_start_locked(ifp, txr);
1334 #endif
1335         IXGBE_TX_UNLOCK(txr);
1336
1337         /* Check for fan failure */
1338         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1339             (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1340                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1341                     "REPLACE IMMEDIATELY!!\n");
1342                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1343         }
1344
1345         /* Link status change */
1346         if (reg_eicr & IXGBE_EICR_LSC)
1347                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1348
1349         /* External PHY interrupt */
1350         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1351             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540))
1352                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1353
1354         if (more)
1355                 taskqueue_enqueue(que->tq, &que->que_task);
1356         else
1357                 ixgbe_enable_intr(adapter);
1358         return;
1359 }
1360
1361
1362 /*********************************************************************
1363  *
1364  *  MSIX Queue Interrupt Service routine
1365  *
1366  **********************************************************************/
1367 void
1368 ixgbe_msix_que(void *arg)
1369 {
1370         struct ix_queue *que = arg;
1371         struct adapter  *adapter = que->adapter;
1372         struct ifnet    *ifp = adapter->ifp;
1373         struct tx_ring  *txr = que->txr;
1374         struct rx_ring  *rxr = que->rxr;
1375         bool            more;
1376         u32             newitr = 0;
1377
1378         /* Protect against spurious interrupts */
1379         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1380                 return;
1381
1382         ixgbe_disable_queue(adapter, que->msix);
1383         ++que->irqs;
1384
1385         more = ixgbe_rxeof(que);
1386
1387         IXGBE_TX_LOCK(txr);
1388         ixgbe_txeof(txr);
1389 #ifdef IXGBE_LEGACY_TX
1390         if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1391                 ixgbe_start_locked(txr, ifp);
1392 #else
1393         if (!drbr_empty(ifp, txr->br))
1394                 ixgbe_mq_start_locked(ifp, txr);
1395 #endif
1396         IXGBE_TX_UNLOCK(txr);
1397
1398         /* Do AIM now? */
1399
1400         if (ixgbe_enable_aim == FALSE)
1401                 goto no_calc;
1402         /*
1403         ** Do Adaptive Interrupt Moderation:
1404         **  - Write out last calculated setting
1405         **  - Calculate based on average size over
1406         **    the last interval.
1407         */
1408         if (que->eitr_setting)
1409                 IXGBE_WRITE_REG(&adapter->hw,
1410                     IXGBE_EITR(que->msix), que->eitr_setting);
1411  
1412         que->eitr_setting = 0;
1413
1414         /* Idle, do nothing */
1415         if ((txr->bytes == 0) && (rxr->bytes == 0))
1416                 goto no_calc;
1417                                 
1418         if ((txr->bytes) && (txr->packets))
1419                 newitr = txr->bytes/txr->packets;
1420         if ((rxr->bytes) && (rxr->packets))
1421                 newitr = max(newitr,
1422                     (rxr->bytes / rxr->packets));
1423         newitr += 24; /* account for hardware frame, crc */
1424
1425         /* set an upper boundary */
1426         newitr = min(newitr, 3000);
1427
1428         /* Be nice to the mid range */
1429         if ((newitr > 300) && (newitr < 1200))
1430                 newitr = (newitr / 3);
1431         else
1432                 newitr = (newitr / 2);
1433
1434         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1435                 newitr |= newitr << 16;
1436         else
1437                 newitr |= IXGBE_EITR_CNT_WDIS;
1438                  
1439         /* save for next interrupt */
1440         que->eitr_setting = newitr;
1441
1442         /* Reset state */
1443         txr->bytes = 0;
1444         txr->packets = 0;
1445         rxr->bytes = 0;
1446         rxr->packets = 0;
1447
1448 no_calc:
1449         if (more)
1450                 taskqueue_enqueue(que->tq, &que->que_task);
1451         else
1452                 ixgbe_enable_queue(adapter, que->msix);
1453         return;
1454 }
1455
1456
1457 static void
1458 ixgbe_msix_link(void *arg)
1459 {
1460         struct adapter  *adapter = arg;
1461         struct ixgbe_hw *hw = &adapter->hw;
1462         u32             reg_eicr, mod_mask;
1463
1464         ++adapter->link_irq;
1465
1466         /* First get the cause */
1467         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1468         /* Be sure the queue bits are not cleared */
1469         reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1470         /* Clear interrupt with write */
1471         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1472
1473         /* Link status change */
1474         if (reg_eicr & IXGBE_EICR_LSC)
1475                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1476
1477         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1478 #ifdef IXGBE_FDIR
1479                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1480                         /* This is probably overkill :) */
1481                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1482                                 return;
1483                         /* Disable the interrupt */
1484                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1485                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1486                 } else
1487 #endif
1488                 if (reg_eicr & IXGBE_EICR_ECC) {
1489                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1490                             "Please Reboot!!\n");
1491                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1492                 }
1493
1494                 /* Check for over temp condition */
1495                 if (reg_eicr & IXGBE_EICR_TS) {
1496                         device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1497                             "PHY IS SHUT DOWN!!\n");
1498                         device_printf(adapter->dev, "System shutdown required!\n");
1499                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1500                 }
1501         }
1502
1503         /* Pluggable optics-related interrupt */
1504         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1505                 mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1506         else
1507                 mod_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
1508
1509         if (ixgbe_is_sfp(hw)) {
1510                 if (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
1511                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1512                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1513                 } else if (reg_eicr & mod_mask) {
1514                         IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1515                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1516                 }
1517         }
1518
1519         /* Check for fan failure */
1520         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1521             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1522                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1523                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1524                     "REPLACE IMMEDIATELY!!\n");
1525         }
1526
1527         /* External PHY interrupt */
1528         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1529             (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1530                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1531                 taskqueue_enqueue(adapter->tq, &adapter->phy_task);
1532         }
1533
1534         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1535         return;
1536 }
1537
1538 /*********************************************************************
1539  *
1540  *  Media Ioctl callback
1541  *
1542  *  This routine is called whenever the user queries the status of
1543  *  the interface using ifconfig.
1544  *
1545  **********************************************************************/
1546 static void
1547 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1548 {
1549         struct adapter *adapter = ifp->if_softc;
1550         struct ixgbe_hw *hw = &adapter->hw;
1551         int layer;
1552
1553         INIT_DEBUGOUT("ixgbe_media_status: begin");
1554         IXGBE_CORE_LOCK(adapter);
1555         ixgbe_update_link_status(adapter);
1556
1557         ifmr->ifm_status = IFM_AVALID;
1558         ifmr->ifm_active = IFM_ETHER;
1559
1560         if (!adapter->link_active) {
1561                 IXGBE_CORE_UNLOCK(adapter);
1562                 return;
1563         }
1564
1565         ifmr->ifm_status |= IFM_ACTIVE;
1566         layer = ixgbe_get_supported_physical_layer(hw);
1567
1568         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1569             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1570             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1571                 switch (adapter->link_speed) {
1572                 case IXGBE_LINK_SPEED_10GB_FULL:
1573                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1574                         break;
1575                 case IXGBE_LINK_SPEED_1GB_FULL:
1576                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1577                         break;
1578                 case IXGBE_LINK_SPEED_100_FULL:
1579                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1580                         break;
1581                 }
1582         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1583             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1584                 switch (adapter->link_speed) {
1585                 case IXGBE_LINK_SPEED_10GB_FULL:
1586                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1587                         break;
1588                 }
1589         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1590                 switch (adapter->link_speed) {
1591                 case IXGBE_LINK_SPEED_10GB_FULL:
1592                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1593                         break;
1594                 case IXGBE_LINK_SPEED_1GB_FULL:
1595                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1596                         break;
1597                 }
1598         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1599                 switch (adapter->link_speed) {
1600                 case IXGBE_LINK_SPEED_10GB_FULL:
1601                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1602                         break;
1603                 case IXGBE_LINK_SPEED_1GB_FULL:
1604                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1605                         break;
1606                 }
1607         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1608             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1609                 switch (adapter->link_speed) {
1610                 case IXGBE_LINK_SPEED_10GB_FULL:
1611                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1612                         break;
1613                 case IXGBE_LINK_SPEED_1GB_FULL:
1614                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1615                         break;
1616                 }
1617         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1618                 switch (adapter->link_speed) {
1619                 case IXGBE_LINK_SPEED_10GB_FULL:
1620                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1621                         break;
1622                 }
1623         /*
1624         ** XXX: These need to use the proper media types once
1625         ** they're added.
1626         */
1627         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1628                 switch (adapter->link_speed) {
1629                 case IXGBE_LINK_SPEED_10GB_FULL:
1630                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1631                         break;
1632                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1633                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1634                         break;
1635                 case IXGBE_LINK_SPEED_1GB_FULL:
1636                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1637                         break;
1638                 }
1639         else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1640             || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1641                 switch (adapter->link_speed) {
1642                 case IXGBE_LINK_SPEED_10GB_FULL:
1643                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1644                         break;
1645                 case IXGBE_LINK_SPEED_2_5GB_FULL:
1646                         ifmr->ifm_active |= IFM_2500_SX | IFM_FDX;
1647                         break;
1648                 case IXGBE_LINK_SPEED_1GB_FULL:
1649                         ifmr->ifm_active |= IFM_1000_CX | IFM_FDX;
1650                         break;
1651                 }
1652         
1653         /* If nothing is recognized... */
1654         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1655                 ifmr->ifm_active |= IFM_UNKNOWN;
1656         
1657 #if __FreeBSD_version >= 900025
1658         /* Display current flow control setting used on link */
1659         if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
1660             hw->fc.current_mode == ixgbe_fc_full)
1661                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1662         if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
1663             hw->fc.current_mode == ixgbe_fc_full)
1664                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1665 #endif
1666
1667         IXGBE_CORE_UNLOCK(adapter);
1668
1669         return;
1670 }
1671
1672 /*********************************************************************
1673  *
1674  *  Media Ioctl callback
1675  *
1676  *  This routine is called when the user changes speed/duplex using
1677  *  media/mediopt option with ifconfig.
1678  *
1679  **********************************************************************/
1680 static int
1681 ixgbe_media_change(struct ifnet * ifp)
1682 {
1683         struct adapter *adapter = ifp->if_softc;
1684         struct ifmedia *ifm = &adapter->media;
1685         struct ixgbe_hw *hw = &adapter->hw;
1686         ixgbe_link_speed speed = 0;
1687
1688         INIT_DEBUGOUT("ixgbe_media_change: begin");
1689
1690         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1691                 return (EINVAL);
1692
1693         if (hw->phy.media_type == ixgbe_media_type_backplane)
1694                 return (EPERM);
1695
1696         /*
1697         ** We don't actually need to check against the supported
1698         ** media types of the adapter; ifmedia will take care of
1699         ** that for us.
1700         */
1701         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1702                 case IFM_AUTO:
1703                 case IFM_10G_T:
1704                         speed |= IXGBE_LINK_SPEED_100_FULL;
1705                 case IFM_10G_LRM:
1706                 case IFM_10G_SR: /* KR, too */
1707                 case IFM_10G_LR:
1708                 case IFM_10G_CX4: /* KX4 */
1709                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1710                 case IFM_10G_TWINAX:
1711                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
1712                         break;
1713                 case IFM_1000_T:
1714                         speed |= IXGBE_LINK_SPEED_100_FULL;
1715                 case IFM_1000_LX:
1716                 case IFM_1000_SX:
1717                 case IFM_1000_CX: /* KX */
1718                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1719                         break;
1720                 case IFM_100_TX:
1721                         speed |= IXGBE_LINK_SPEED_100_FULL;
1722                         break;
1723                 default:
1724                         goto invalid;
1725         }
1726
1727         hw->mac.autotry_restart = TRUE;
1728         hw->mac.ops.setup_link(hw, speed, TRUE);
1729         adapter->advertise =
1730                 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1731                 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1732                 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1733
1734         return (0);
1735
1736 invalid:
1737         device_printf(adapter->dev, "Invalid media type!\n");
1738         return (EINVAL);
1739 }
1740
1741 static void
1742 ixgbe_set_promisc(struct adapter *adapter)
1743 {
1744         u_int32_t       reg_rctl;
1745         struct ifnet   *ifp = adapter->ifp;
1746         int             mcnt = 0;
1747
1748         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1749         reg_rctl &= (~IXGBE_FCTRL_UPE);
1750         if (ifp->if_flags & IFF_ALLMULTI)
1751                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1752         else {
1753                 struct  ifmultiaddr *ifma;
1754 #if __FreeBSD_version < 800000
1755                 IF_ADDR_LOCK(ifp);
1756 #else
1757                 if_maddr_rlock(ifp);
1758 #endif
1759                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1760                         if (ifma->ifma_addr->sa_family != AF_LINK)
1761                                 continue;
1762                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1763                                 break;
1764                         mcnt++;
1765                 }
1766 #if __FreeBSD_version < 800000
1767                 IF_ADDR_UNLOCK(ifp);
1768 #else
1769                 if_maddr_runlock(ifp);
1770 #endif
1771         }
1772         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1773                 reg_rctl &= (~IXGBE_FCTRL_MPE);
1774         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1775
1776         if (ifp->if_flags & IFF_PROMISC) {
1777                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1778                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1779         } else if (ifp->if_flags & IFF_ALLMULTI) {
1780                 reg_rctl |= IXGBE_FCTRL_MPE;
1781                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1782                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1783         }
1784         return;
1785 }
1786
1787
1788 /*********************************************************************
1789  *  Multicast Update
1790  *
1791  *  This routine is called whenever multicast address list is updated.
1792  *
1793  **********************************************************************/
1794 #define IXGBE_RAR_ENTRIES 16
1795
1796 static void
1797 ixgbe_set_multi(struct adapter *adapter)
1798 {
1799         u32     fctrl;
1800         u8      *mta;
1801         u8      *update_ptr;
1802         struct  ifmultiaddr *ifma;
1803         int     mcnt = 0;
1804         struct ifnet   *ifp = adapter->ifp;
1805
1806         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1807
1808         mta = adapter->mta;
1809         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1810             MAX_NUM_MULTICAST_ADDRESSES);
1811
1812 #if __FreeBSD_version < 800000
1813         IF_ADDR_LOCK(ifp);
1814 #else
1815         if_maddr_rlock(ifp);
1816 #endif
1817         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1818                 if (ifma->ifma_addr->sa_family != AF_LINK)
1819                         continue;
1820                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1821                         break;
1822                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1823                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1824                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1825                 mcnt++;
1826         }
1827 #if __FreeBSD_version < 800000
1828         IF_ADDR_UNLOCK(ifp);
1829 #else
1830         if_maddr_runlock(ifp);
1831 #endif
1832
1833         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1834         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1835         if (ifp->if_flags & IFF_PROMISC)
1836                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1837         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1838             ifp->if_flags & IFF_ALLMULTI) {
1839                 fctrl |= IXGBE_FCTRL_MPE;
1840                 fctrl &= ~IXGBE_FCTRL_UPE;
1841         } else
1842                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1843         
1844         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1845
1846         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1847                 update_ptr = mta;
1848                 ixgbe_update_mc_addr_list(&adapter->hw,
1849                     update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1850         }
1851
1852         return;
1853 }
1854
1855 /*
1856  * This is an iterator function now needed by the multicast
1857  * shared code. It simply feeds the shared code routine the
1858  * addresses in the array of ixgbe_set_multi() one by one.
1859  */
1860 static u8 *
1861 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1862 {
1863         u8 *addr = *update_ptr;
1864         u8 *newptr;
1865         *vmdq = 0;
1866
1867         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1868         *update_ptr = newptr;
1869         return addr;
1870 }
1871
1872
1873 /*********************************************************************
1874  *  Timer routine
1875  *
1876  *  This routine checks for link status,updates statistics,
1877  *  and runs the watchdog check.
1878  *
1879  **********************************************************************/
1880
1881 static void
1882 ixgbe_local_timer(void *arg)
1883 {
1884         struct adapter  *adapter = arg;
1885         device_t        dev = adapter->dev;
1886         struct ix_queue *que = adapter->queues;
1887         u64             queues = 0;
1888         int             hung = 0;
1889
1890         mtx_assert(&adapter->core_mtx, MA_OWNED);
1891
1892         /* Check for pluggable optics */
1893         if (adapter->sfp_probe)
1894                 if (!ixgbe_sfp_probe(adapter))
1895                         goto out; /* Nothing to do */
1896
1897         ixgbe_update_link_status(adapter);
1898         ixgbe_update_stats_counters(adapter);
1899
1900         /*
1901         ** Check the TX queues status
1902         **      - mark hung queues so we don't schedule on them
1903         **      - watchdog only if all queues show hung
1904         */          
1905         for (int i = 0; i < adapter->num_queues; i++, que++) {
1906                 /* Keep track of queues with work for soft irq */
1907                 if (que->txr->busy)
1908                         queues |= ((u64)1 << que->me);
1909                 /*
1910                 ** Each time txeof runs without cleaning, but there
1911                 ** are uncleaned descriptors it increments busy. If
1912                 ** we get to the MAX we declare it hung.
1913                 */
1914                 if (que->busy == IXGBE_QUEUE_HUNG) {
1915                         ++hung;
1916                         /* Mark the queue as inactive */
1917                         adapter->active_queues &= ~((u64)1 << que->me);
1918                         continue;
1919                 } else {
1920                         /* Check if we've come back from hung */
1921                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1922                                 adapter->active_queues |= ((u64)1 << que->me);
1923                 }
1924                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1925                         device_printf(dev,"Warning queue %d "
1926                             "appears to be hung!\n", i);
1927                         que->txr->busy = IXGBE_QUEUE_HUNG;
1928                         ++hung;
1929                 }
1930
1931         }
1932
1933         /* Only truly watchdog if all queues show hung */
1934         if (hung == adapter->num_queues)
1935                 goto watchdog;
1936         else if (queues != 0) { /* Force an IRQ on queues with work */
1937                 ixgbe_rearm_queues(adapter, queues);
1938         }
1939
1940 out:
1941         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1942         return;
1943
1944 watchdog:
1945         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1946         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1947         adapter->watchdog_events++;
1948         ixgbe_init_locked(adapter);
1949 }
1950
1951 /*
1952 ** Note: this routine updates the OS on the link state
1953 **      the real check of the hardware only happens with
1954 **      a link interrupt.
1955 */
1956 static void
1957 ixgbe_update_link_status(struct adapter *adapter)
1958 {
1959         struct ifnet    *ifp = adapter->ifp;
1960         device_t dev = adapter->dev;
1961
1962         if (adapter->link_up){ 
1963                 if (adapter->link_active == FALSE) {
1964                         if (bootverbose)
1965                                 device_printf(dev,"Link is up %d Gbps %s \n",
1966                                     ((adapter->link_speed == 128)? 10:1),
1967                                     "Full Duplex");
1968                         adapter->link_active = TRUE;
1969                         /* Update any Flow Control changes */
1970                         ixgbe_fc_enable(&adapter->hw);
1971                         /* Update DMA coalescing config */
1972                         ixgbe_config_dmac(adapter);
1973                         if_link_state_change(ifp, LINK_STATE_UP);
1974                 }
1975         } else { /* Link down */
1976                 if (adapter->link_active == TRUE) {
1977                         if (bootverbose)
1978                                 device_printf(dev,"Link is Down\n");
1979                         if_link_state_change(ifp, LINK_STATE_DOWN);
1980                         adapter->link_active = FALSE;
1981                 }
1982         }
1983
1984         return;
1985 }
1986
1987
1988 /*********************************************************************
1989  *
1990  *  This routine disables all traffic on the adapter by issuing a
1991  *  global reset on the MAC and deallocates TX/RX buffers.
1992  *
1993  **********************************************************************/
1994
1995 static void
1996 ixgbe_stop(void *arg)
1997 {
1998         struct ifnet   *ifp;
1999         struct adapter *adapter = arg;
2000         struct ixgbe_hw *hw = &adapter->hw;
2001         ifp = adapter->ifp;
2002
2003         mtx_assert(&adapter->core_mtx, MA_OWNED);
2004
2005         INIT_DEBUGOUT("ixgbe_stop: begin\n");
2006         ixgbe_disable_intr(adapter);
2007         callout_stop(&adapter->timer);
2008
2009         /* Let the stack know...*/
2010         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2011
2012         ixgbe_reset_hw(hw);
2013         hw->adapter_stopped = FALSE;
2014         ixgbe_stop_adapter(hw);
2015         if (hw->mac.type == ixgbe_mac_82599EB)
2016                 ixgbe_stop_mac_link_on_d3_82599(hw);
2017         /* Turn off the laser - noop with no optics */
2018         ixgbe_disable_tx_laser(hw);
2019
2020         /* Update the stack */
2021         adapter->link_up = FALSE;
2022         ixgbe_update_link_status(adapter);
2023
2024         /* reprogram the RAR[0] in case user changed it. */
2025         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2026
2027         return;
2028 }
2029
2030
2031 /*********************************************************************
2032  *
2033  *  Determine hardware revision.
2034  *
2035  **********************************************************************/
2036 static void
2037 ixgbe_identify_hardware(struct adapter *adapter)
2038 {
2039         device_t        dev = adapter->dev;
2040         struct ixgbe_hw *hw = &adapter->hw;
2041
2042         /* Save off the information about this board */
2043         hw->vendor_id = pci_get_vendor(dev);
2044         hw->device_id = pci_get_device(dev);
2045         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2046         hw->subsystem_vendor_id =
2047             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2048         hw->subsystem_device_id =
2049             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2050
2051         /*
2052         ** Make sure BUSMASTER is set
2053         */
2054         pci_enable_busmaster(dev);
2055
2056         /* We need this here to set the num_segs below */
2057         ixgbe_set_mac_type(hw);
2058
2059         /* Pick up the 82599 settings */
2060         if (hw->mac.type != ixgbe_mac_82598EB) {
2061                 hw->phy.smart_speed = ixgbe_smart_speed;
2062                 adapter->num_segs = IXGBE_82599_SCATTER;
2063         } else
2064                 adapter->num_segs = IXGBE_82598_SCATTER;
2065
2066         return;
2067 }
2068
2069 /*********************************************************************
2070  *
2071  *  Determine optic type
2072  *
2073  **********************************************************************/
2074 static void
2075 ixgbe_setup_optics(struct adapter *adapter)
2076 {
2077         struct ixgbe_hw *hw = &adapter->hw;
2078         int             layer;
2079
2080         layer = ixgbe_get_supported_physical_layer(hw);
2081
2082         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2083                 adapter->optics = IFM_10G_T;
2084                 return;
2085         }
2086
2087         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2088                 adapter->optics = IFM_1000_T;
2089                 return;
2090         }
2091
2092         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
2093                 adapter->optics = IFM_1000_SX;
2094                 return;
2095         }
2096
2097         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2098             IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2099                 adapter->optics = IFM_10G_LR;
2100                 return;
2101         }
2102
2103         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2104                 adapter->optics = IFM_10G_SR;
2105                 return;
2106         }
2107
2108         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2109                 adapter->optics = IFM_10G_TWINAX;
2110                 return;
2111         }
2112
2113         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2114             IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2115                 adapter->optics = IFM_10G_CX4;
2116                 return;
2117         }
2118
2119         /* If we get here just set the default */
2120         adapter->optics = IFM_ETHER | IFM_AUTO;
2121         return;
2122 }
2123
2124 /*********************************************************************
2125  *
2126  *  Setup the Legacy or MSI Interrupt handler
2127  *
2128  **********************************************************************/
2129 static int
2130 ixgbe_allocate_legacy(struct adapter *adapter)
2131 {
2132         device_t        dev = adapter->dev;
2133         struct          ix_queue *que = adapter->queues;
2134 #ifndef IXGBE_LEGACY_TX
2135         struct tx_ring          *txr = adapter->tx_rings;
2136 #endif
2137         int             error, rid = 0;
2138
2139         /* MSI RID at 1 */
2140         if (adapter->msix == 1)
2141                 rid = 1;
2142
2143         /* We allocate a single interrupt resource */
2144         adapter->res = bus_alloc_resource_any(dev,
2145             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2146         if (adapter->res == NULL) {
2147                 device_printf(dev, "Unable to allocate bus resource: "
2148                     "interrupt\n");
2149                 return (ENXIO);
2150         }
2151
2152         /*
2153          * Try allocating a fast interrupt and the associated deferred
2154          * processing contexts.
2155          */
2156 #ifndef IXGBE_LEGACY_TX
2157         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2158 #endif
2159         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2160         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2161             taskqueue_thread_enqueue, &que->tq);
2162         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2163             device_get_nameunit(adapter->dev));
2164
2165         /* Tasklets for Link, SFP and Multispeed Fiber */
2166         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2167         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2168         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2169         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2170 #ifdef IXGBE_FDIR
2171         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2172 #endif
2173         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2174             taskqueue_thread_enqueue, &adapter->tq);
2175         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2176             device_get_nameunit(adapter->dev));
2177
2178         if ((error = bus_setup_intr(dev, adapter->res,
2179             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2180             que, &adapter->tag)) != 0) {
2181                 device_printf(dev, "Failed to register fast interrupt "
2182                     "handler: %d\n", error);
2183                 taskqueue_free(que->tq);
2184                 taskqueue_free(adapter->tq);
2185                 que->tq = NULL;
2186                 adapter->tq = NULL;
2187                 return (error);
2188         }
2189         /* For simplicity in the handlers */
2190         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2191
2192         return (0);
2193 }
2194
2195
2196 /*********************************************************************
2197  *
2198  *  Setup MSIX Interrupt resources and handlers 
2199  *
2200  **********************************************************************/
2201 static int
2202 ixgbe_allocate_msix(struct adapter *adapter)
2203 {
2204         device_t        dev = adapter->dev;
2205         struct          ix_queue *que = adapter->queues;
2206         struct          tx_ring *txr = adapter->tx_rings;
2207         int             error, rid, vector = 0;
2208         int             cpu_id = 0;
2209
2210         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2211                 rid = vector + 1;
2212                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2213                     RF_SHAREABLE | RF_ACTIVE);
2214                 if (que->res == NULL) {
2215                         device_printf(dev,"Unable to allocate"
2216                             " bus resource: que interrupt [%d]\n", vector);
2217                         return (ENXIO);
2218                 }
2219                 /* Set the handler function */
2220                 error = bus_setup_intr(dev, que->res,
2221                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2222                     ixgbe_msix_que, que, &que->tag);
2223                 if (error) {
2224                         que->res = NULL;
2225                         device_printf(dev, "Failed to register QUE handler");
2226                         return (error);
2227                 }
2228 #if __FreeBSD_version >= 800504
2229                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2230 #endif
2231                 que->msix = vector;
2232                 adapter->active_queues |= (u64)(1 << que->msix);
2233                 /*
2234                  * Bind the msix vector, and thus the
2235                  * rings to the corresponding cpu.
2236                  *
2237                  * This just happens to match the default RSS round-robin
2238                  * bucket -> queue -> CPU allocation.
2239                  */
2240                 if (adapter->num_queues > 1)
2241                         cpu_id = i;
2242
2243                 if (adapter->num_queues > 1)
2244                         bus_bind_intr(dev, que->res, cpu_id);
2245
2246 #ifndef IXGBE_LEGACY_TX
2247                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2248 #endif
2249                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2250                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2251                     taskqueue_thread_enqueue, &que->tq);
2252                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2253                     device_get_nameunit(adapter->dev));
2254         }
2255
2256         /* and Link */
2257         rid = vector + 1;
2258         adapter->res = bus_alloc_resource_any(dev,
2259             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2260         if (!adapter->res) {
2261                 device_printf(dev,"Unable to allocate"
2262             " bus resource: Link interrupt [%d]\n", rid);
2263                 return (ENXIO);
2264         }
2265         /* Set the link handler function */
2266         error = bus_setup_intr(dev, adapter->res,
2267             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2268             ixgbe_msix_link, adapter, &adapter->tag);
2269         if (error) {
2270                 adapter->res = NULL;
2271                 device_printf(dev, "Failed to register LINK handler");
2272                 return (error);
2273         }
2274 #if __FreeBSD_version >= 800504
2275         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2276 #endif
2277         adapter->vector = vector;
2278         /* Tasklets for Link, SFP and Multispeed Fiber */
2279         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2280         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2281         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2282         TASK_INIT(&adapter->phy_task, 0, ixgbe_handle_phy, adapter);
2283 #ifdef IXGBE_FDIR
2284         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2285 #endif
2286         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2287             taskqueue_thread_enqueue, &adapter->tq);
2288         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2289             device_get_nameunit(adapter->dev));
2290
2291         return (0);
2292 }
2293
2294 /*
2295  * Setup Either MSI/X or MSI
2296  */
2297 static int
2298 ixgbe_setup_msix(struct adapter *adapter)
2299 {
2300         device_t dev = adapter->dev;
2301         int rid, want, queues, msgs;
2302
2303         /* Override by tuneable */
2304         if (ixgbe_enable_msix == 0)
2305                 goto msi;
2306
2307         /* First try MSI/X */
2308         msgs = pci_msix_count(dev); 
2309         if (msgs == 0)
2310                 goto msi;
2311         rid = PCIR_BAR(MSIX_82598_BAR);
2312         adapter->msix_mem = bus_alloc_resource_any(dev,
2313             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2314         if (adapter->msix_mem == NULL) {
2315                 rid += 4;       /* 82599 maps in higher BAR */
2316                 adapter->msix_mem = bus_alloc_resource_any(dev,
2317                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2318         }
2319         if (adapter->msix_mem == NULL) {
2320                 /* May not be enabled */
2321                 device_printf(adapter->dev,
2322                     "Unable to map MSIX table \n");
2323                 goto msi;
2324         }
2325
2326         /* Figure out a reasonable auto config value */
2327         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2328
2329         if (ixgbe_num_queues != 0)
2330                 queues = ixgbe_num_queues;
2331         /* Set max queues to 8 when autoconfiguring */
2332         else if ((ixgbe_num_queues == 0) && (queues > 8))
2333                 queues = 8;
2334
2335         /* reflect correct sysctl value */
2336         ixgbe_num_queues = queues;
2337
2338         /*
2339         ** Want one vector (RX/TX pair) per queue
2340         ** plus an additional for Link.
2341         */
2342         want = queues + 1;
2343         if (msgs >= want)
2344                 msgs = want;
2345         else {
2346                 device_printf(adapter->dev,
2347                     "MSIX Configuration Problem, "
2348                     "%d vectors but %d queues wanted!\n",
2349                     msgs, want);
2350                 goto msi;
2351         }
2352         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2353                 device_printf(adapter->dev,
2354                     "Using MSIX interrupts with %d vectors\n", msgs);
2355                 adapter->num_queues = queues;
2356                 return (msgs);
2357         }
2358         /*
2359         ** If MSIX alloc failed or provided us with
2360         ** less than needed, free and fall through to MSI
2361         */
2362         pci_release_msi(dev);
2363
2364 msi:
2365         if (adapter->msix_mem != NULL) {
2366                 bus_release_resource(dev, SYS_RES_MEMORY,
2367                     rid, adapter->msix_mem);
2368                 adapter->msix_mem = NULL;
2369         }
2370         msgs = 1;
2371         if (pci_alloc_msi(dev, &msgs) == 0) {
2372                 device_printf(adapter->dev,"Using an MSI interrupt\n");
2373                 return (msgs);
2374         }
2375         device_printf(adapter->dev,"Using a Legacy interrupt\n");
2376         return (0);
2377 }
2378
2379
2380 static int
2381 ixgbe_allocate_pci_resources(struct adapter *adapter)
2382 {
2383         int             rid;
2384         device_t        dev = adapter->dev;
2385
2386         rid = PCIR_BAR(0);
2387         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2388             &rid, RF_ACTIVE);
2389
2390         if (!(adapter->pci_mem)) {
2391                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2392                 return (ENXIO);
2393         }
2394
2395         adapter->osdep.mem_bus_space_tag =
2396                 rman_get_bustag(adapter->pci_mem);
2397         adapter->osdep.mem_bus_space_handle =
2398                 rman_get_bushandle(adapter->pci_mem);
2399         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2400
2401         /* Legacy defaults */
2402         adapter->num_queues = 1;
2403         adapter->hw.back = &adapter->osdep;
2404
2405         /*
2406         ** Now setup MSI or MSI/X, should
2407         ** return us the number of supported
2408         ** vectors. (Will be 1 for MSI)
2409         */
2410         adapter->msix = ixgbe_setup_msix(adapter);
2411         return (0);
2412 }
2413
2414 static void
2415 ixgbe_free_pci_resources(struct adapter * adapter)
2416 {
2417         struct          ix_queue *que = adapter->queues;
2418         device_t        dev = adapter->dev;
2419         int             rid, memrid;
2420
2421         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2422                 memrid = PCIR_BAR(MSIX_82598_BAR);
2423         else
2424                 memrid = PCIR_BAR(MSIX_82599_BAR);
2425
2426         /*
2427         ** There is a slight possibility of a failure mode
2428         ** in attach that will result in entering this function
2429         ** before interrupt resources have been initialized, and
2430         ** in that case we do not want to execute the loops below
2431         ** We can detect this reliably by the state of the adapter
2432         ** res pointer.
2433         */
2434         if (adapter->res == NULL)
2435                 goto mem;
2436
2437         /*
2438         **  Release all msix queue resources:
2439         */
2440         for (int i = 0; i < adapter->num_queues; i++, que++) {
2441                 rid = que->msix + 1;
2442                 if (que->tag != NULL) {
2443                         bus_teardown_intr(dev, que->res, que->tag);
2444                         que->tag = NULL;
2445                 }
2446                 if (que->res != NULL)
2447                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2448         }
2449
2450
2451         /* Clean the Legacy or Link interrupt last */
2452         if (adapter->vector) /* we are doing MSIX */
2453                 rid = adapter->vector + 1;
2454         else
2455                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2456
2457         if (adapter->tag != NULL) {
2458                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2459                 adapter->tag = NULL;
2460         }
2461         if (adapter->res != NULL)
2462                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2463
2464 mem:
2465         if (adapter->msix)
2466                 pci_release_msi(dev);
2467
2468         if (adapter->msix_mem != NULL)
2469                 bus_release_resource(dev, SYS_RES_MEMORY,
2470                     memrid, adapter->msix_mem);
2471
2472         if (adapter->pci_mem != NULL)
2473                 bus_release_resource(dev, SYS_RES_MEMORY,
2474                     PCIR_BAR(0), adapter->pci_mem);
2475
2476         return;
2477 }
2478
2479 /*********************************************************************
2480  *
2481  *  Setup networking device structure and register an interface.
2482  *
2483  **********************************************************************/
2484 static int
2485 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2486 {
2487         struct ifnet   *ifp;
2488
2489         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2490
2491         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2492         if (ifp == NULL) {
2493                 device_printf(dev, "can not allocate ifnet structure\n");
2494                 return (-1);
2495         }
2496         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2497         if_initbaudrate(ifp, IF_Gbps(10));
2498         ifp->if_init = ixgbe_init;
2499         ifp->if_softc = adapter;
2500         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2501         ifp->if_ioctl = ixgbe_ioctl;
2502         /* TSO parameters */
2503         ifp->if_hw_tsomax = 65518;
2504         ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
2505         ifp->if_hw_tsomaxsegsize = 2048;
2506 #ifndef IXGBE_LEGACY_TX
2507         ifp->if_transmit = ixgbe_mq_start;
2508         ifp->if_qflush = ixgbe_qflush;
2509 #else
2510         ifp->if_start = ixgbe_start;
2511         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2512         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2513         IFQ_SET_READY(&ifp->if_snd);
2514 #endif
2515
2516         ether_ifattach(ifp, adapter->hw.mac.addr);
2517
2518         adapter->max_frame_size =
2519             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2520
2521         /*
2522          * Tell the upper layer(s) we support long frames.
2523          */
2524         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2525
2526         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2527         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2528         ifp->if_capabilities |= IFCAP_LRO;
2529         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2530                              |  IFCAP_VLAN_HWTSO
2531                              |  IFCAP_VLAN_MTU
2532                              |  IFCAP_HWSTATS;
2533         ifp->if_capenable = ifp->if_capabilities;
2534
2535         /*
2536         ** Don't turn this on by default, if vlans are
2537         ** created on another pseudo device (eg. lagg)
2538         ** then vlan events are not passed thru, breaking
2539         ** operation, but with HW FILTER off it works. If
2540         ** using vlans directly on the ixgbe driver you can
2541         ** enable this and get full hardware tag filtering.
2542         */
2543         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2544
2545         /*
2546          * Specify the media types supported by this adapter and register
2547          * callbacks to update media and link information
2548          */
2549         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2550                     ixgbe_media_status);
2551
2552         ixgbe_add_media_types(adapter);
2553
2554         /* Autoselect media by default */
2555         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2556
2557         return (0);
2558 }
2559
2560 static void
2561 ixgbe_add_media_types(struct adapter *adapter)
2562 {
2563         struct ixgbe_hw *hw = &adapter->hw;
2564         device_t dev = adapter->dev;
2565         int layer;
2566
2567         layer = ixgbe_get_supported_physical_layer(hw);
2568
2569         /* Media types with matching FreeBSD media defines */
2570         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2571                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2572         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2573                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2574         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2575                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2576         
2577         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2578             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2579                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2580
2581         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2582                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2583         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2584                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2585         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2586                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2587         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2588                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2589
2590         /*
2591         ** Other (no matching FreeBSD media type):
2592         ** To workaround this, we'll assign these completely
2593         ** inappropriate media types.
2594         */
2595         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2596                 device_printf(dev, "Media supported: 10GbaseKR\n");
2597                 device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n");
2598                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2599         }
2600         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2601                 device_printf(dev, "Media supported: 10GbaseKX4\n");
2602                 device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n");
2603                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2604         }
2605         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2606                 device_printf(dev, "Media supported: 1000baseKX\n");
2607                 device_printf(dev, "1000baseKX mapped to 1000baseCX\n");
2608                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
2609         }
2610         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2611                 /* Someday, someone will care about you... */
2612                 device_printf(dev, "Media supported: 1000baseBX\n");
2613         }
2614         
2615         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2616                 ifmedia_add(&adapter->media,
2617                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2618                 ifmedia_add(&adapter->media,
2619                     IFM_ETHER | IFM_1000_T, 0, NULL);
2620         }
2621
2622         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2623 }
2624
2625 static void
2626 ixgbe_config_link(struct adapter *adapter)
2627 {
2628         struct ixgbe_hw *hw = &adapter->hw;
2629         u32     autoneg, err = 0;
2630         bool    sfp, negotiate;
2631
2632         sfp = ixgbe_is_sfp(hw);
2633
2634         if (sfp) { 
2635                 if (hw->phy.multispeed_fiber) {
2636                         hw->mac.ops.setup_sfp(hw);
2637                         ixgbe_enable_tx_laser(hw);
2638                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2639                 } else
2640                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2641         } else {
2642                 if (hw->mac.ops.check_link)
2643                         err = ixgbe_check_link(hw, &adapter->link_speed,
2644                             &adapter->link_up, FALSE);
2645                 if (err)
2646                         goto out;
2647                 autoneg = hw->phy.autoneg_advertised;
2648                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2649                         err  = hw->mac.ops.get_link_capabilities(hw,
2650                             &autoneg, &negotiate);
2651                 if (err)
2652                         goto out;
2653                 if (hw->mac.ops.setup_link)
2654                         err = hw->mac.ops.setup_link(hw,
2655                             autoneg, adapter->link_up);
2656         }
2657 out:
2658         return;
2659 }
2660
2661
2662 /*********************************************************************
2663  *
2664  *  Enable transmit units.
2665  *
2666  **********************************************************************/
2667 static void
2668 ixgbe_initialize_transmit_units(struct adapter *adapter)
2669 {
2670         struct tx_ring  *txr = adapter->tx_rings;
2671         struct ixgbe_hw *hw = &adapter->hw;
2672
2673         /* Setup the Base and Length of the Tx Descriptor Ring */
2674
2675         for (int i = 0; i < adapter->num_queues; i++, txr++) {
2676                 u64     tdba = txr->txdma.dma_paddr;
2677                 u32     txctrl = 0;
2678
2679                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2680                        (tdba & 0x00000000ffffffffULL));
2681                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2682                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2683                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2684
2685                 /* Setup the HW Tx Head and Tail descriptor pointers */
2686                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2687                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2688
2689                 /* Cache the tail address */
2690                 txr->tail = IXGBE_TDT(txr->me);
2691
2692                 /* Set the processing limit */
2693                 txr->process_limit = ixgbe_tx_process_limit;
2694
2695                 /* Disable Head Writeback */
2696                 switch (hw->mac.type) {
2697                 case ixgbe_mac_82598EB:
2698                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2699                         break;
2700                 case ixgbe_mac_82599EB:
2701                 case ixgbe_mac_X540:
2702                 default:
2703                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2704                         break;
2705                 }
2706                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2707                 switch (hw->mac.type) {
2708                 case ixgbe_mac_82598EB:
2709                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2710                         break;
2711                 case ixgbe_mac_82599EB:
2712                 case ixgbe_mac_X540:
2713                 default:
2714                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2715                         break;
2716                 }
2717
2718         }
2719
2720         if (hw->mac.type != ixgbe_mac_82598EB) {
2721                 u32 dmatxctl, rttdcs;
2722                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2723                 dmatxctl |= IXGBE_DMATXCTL_TE;
2724                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2725                 /* Disable arbiter to set MTQC */
2726                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2727                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2728                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2729                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2730                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2731                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2732         }
2733
2734         return;
2735 }
2736
2737 static void
2738 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2739 {
2740         struct ixgbe_hw *hw = &adapter->hw;
2741         uint32_t reta;
2742         int i, j, queue_id, table_size;
2743         int index_mult;
2744         uint32_t rss_key[10];
2745         uint32_t mrqc;
2746
2747         /* Setup RSS */
2748         reta = 0;
2749
2750         /* set up random bits */
2751         arc4rand(&rss_key, sizeof(rss_key), 0);
2752
2753         /* Set multiplier for RETA setup and table size based on MAC */
2754         index_mult = 0x1;
2755         table_size = 128;
2756         switch (adapter->hw.mac.type) {
2757         case ixgbe_mac_82598EB:
2758                 index_mult = 0x11;
2759                 break;
2760         case ixgbe_mac_X550:
2761         case ixgbe_mac_X550EM_x:
2762                 table_size = 512;
2763                 break;
2764         default:
2765                 break;
2766         }
2767
2768         /* Set up the redirection table */
2769         for (i = 0, j = 0; i < table_size; i++, j++) {
2770                 if (j == adapter->num_queues) j = 0;
2771                 queue_id = (j * index_mult);
2772                 /*
2773                  * The low 8 bits are for hash value (n+0);
2774                  * The next 8 bits are for hash value (n+1), etc.
2775                  */
2776                 reta = reta >> 8;
2777                 reta = reta | ( ((uint32_t) queue_id) << 24);
2778                 if ((i & 3) == 3) {
2779                         if (i < 128)
2780                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2781                         else
2782                                 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta);
2783                         reta = 0;
2784                 }
2785         }
2786
2787         /* Now fill our hash function seeds */
2788         for (int i = 0; i < 10; i++)
2789                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2790
2791         /* Perform hash on these packet types */
2792         /*
2793          * Disable UDP - IP fragments aren't currently being handled
2794          * and so we end up with a mix of 2-tuple and 4-tuple
2795          * traffic.
2796          */
2797         mrqc = IXGBE_MRQC_RSSEN
2798              | IXGBE_MRQC_RSS_FIELD_IPV4
2799              | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2800 #if 0
2801              | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2802 #endif
2803              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2804              | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2805              | IXGBE_MRQC_RSS_FIELD_IPV6
2806              | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2807 #if 0
2808              | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2809              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2810 #endif
2811         ;
2812         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2813 }
2814
2815
2816 /*********************************************************************
2817  *
2818  *  Setup receive registers and features.
2819  *
2820  **********************************************************************/
2821 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2822
2823 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2824         
2825 static void
2826 ixgbe_initialize_receive_units(struct adapter *adapter)
2827 {
2828         struct  rx_ring *rxr = adapter->rx_rings;
2829         struct ixgbe_hw *hw = &adapter->hw;
2830         struct ifnet   *ifp = adapter->ifp;
2831         u32             bufsz, fctrl, srrctl, rxcsum;
2832         u32             hlreg;
2833
2834
2835         /*
2836          * Make sure receives are disabled while
2837          * setting up the descriptor ring
2838          */
2839         ixgbe_disable_rx(hw);
2840
2841         /* Enable broadcasts */
2842         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2843         fctrl |= IXGBE_FCTRL_BAM;
2844         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2845                 fctrl |= IXGBE_FCTRL_DPF;
2846                 fctrl |= IXGBE_FCTRL_PMCF;
2847         }
2848         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2849
2850         /* Set for Jumbo Frames? */
2851         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2852         if (ifp->if_mtu > ETHERMTU)
2853                 hlreg |= IXGBE_HLREG0_JUMBOEN;
2854         else
2855                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2856 #ifdef DEV_NETMAP
2857         /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2858         if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2859                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2860         else
2861                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2862 #endif /* DEV_NETMAP */
2863         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2864
2865         bufsz = (adapter->rx_mbuf_sz +
2866             BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2867
2868         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2869                 u64 rdba = rxr->rxdma.dma_paddr;
2870
2871                 /* Setup the Base and Length of the Rx Descriptor Ring */
2872                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2873                                (rdba & 0x00000000ffffffffULL));
2874                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2875                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2876                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2877
2878                 /* Set up the SRRCTL register */
2879                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2880                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2881                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2882                 srrctl |= bufsz;
2883                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2884
2885                 /*
2886                  * Set DROP_EN iff we have no flow control and >1 queue.
2887                  * Note that srrctl was cleared shortly before during reset,
2888                  * so we do not need to clear the bit, but do it just in case
2889                  * this code is moved elsewhere.
2890                  */
2891                 if (adapter->num_queues > 1 &&
2892                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2893                         srrctl |= IXGBE_SRRCTL_DROP_EN;
2894                 } else {
2895                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2896                 }
2897
2898                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2899
2900                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2901                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2902                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2903
2904                 /* Set the processing limit */
2905                 rxr->process_limit = ixgbe_rx_process_limit;
2906
2907                 /* Set the driver rx tail address */
2908                 rxr->tail =  IXGBE_RDT(rxr->me);
2909         }
2910
2911         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2912                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2913                               IXGBE_PSRTYPE_UDPHDR |
2914                               IXGBE_PSRTYPE_IPV4HDR |
2915                               IXGBE_PSRTYPE_IPV6HDR;
2916                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2917         }
2918
2919         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2920
2921         ixgbe_initialise_rss_mapping(adapter);
2922
2923         if (adapter->num_queues > 1) {
2924                 /* RSS and RX IPP Checksum are mutually exclusive */
2925                 rxcsum |= IXGBE_RXCSUM_PCSD;
2926         }
2927
2928         if (ifp->if_capenable & IFCAP_RXCSUM)
2929                 rxcsum |= IXGBE_RXCSUM_PCSD;
2930
2931         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2932                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2933
2934         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2935
2936         return;
2937 }
2938
2939
2940 /*
2941 ** This routine is run via an vlan config EVENT,
2942 ** it enables us to use the HW Filter table since
2943 ** we can get the vlan id. This just creates the
2944 ** entry in the soft version of the VFTA, init will
2945 ** repopulate the real table.
2946 */
2947 static void
2948 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2949 {
2950         struct adapter  *adapter = ifp->if_softc;
2951         u16             index, bit;
2952
2953         if (ifp->if_softc !=  arg)   /* Not our event */
2954                 return;
2955
2956         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2957                 return;
2958
2959         IXGBE_CORE_LOCK(adapter);
2960         index = (vtag >> 5) & 0x7F;
2961         bit = vtag & 0x1F;
2962         adapter->shadow_vfta[index] |= (1 << bit);
2963         ++adapter->num_vlans;
2964         ixgbe_setup_vlan_hw_support(adapter);
2965         IXGBE_CORE_UNLOCK(adapter);
2966 }
2967
2968 /*
2969 ** This routine is run via an vlan
2970 ** unconfig EVENT, remove our entry
2971 ** in the soft vfta.
2972 */
2973 static void
2974 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2975 {
2976         struct adapter  *adapter = ifp->if_softc;
2977         u16             index, bit;
2978
2979         if (ifp->if_softc !=  arg)
2980                 return;
2981
2982         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2983                 return;
2984
2985         IXGBE_CORE_LOCK(adapter);
2986         index = (vtag >> 5) & 0x7F;
2987         bit = vtag & 0x1F;
2988         adapter->shadow_vfta[index] &= ~(1 << bit);
2989         --adapter->num_vlans;
2990         /* Re-init to load the changes */
2991         ixgbe_setup_vlan_hw_support(adapter);
2992         IXGBE_CORE_UNLOCK(adapter);
2993 }
2994
2995 static void
2996 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2997 {
2998         struct ifnet    *ifp = adapter->ifp;
2999         struct ixgbe_hw *hw = &adapter->hw;
3000         struct rx_ring  *rxr;
3001         u32             ctrl;
3002
3003
3004         /*
3005         ** We get here thru init_locked, meaning
3006         ** a soft reset, this has already cleared
3007         ** the VFTA and other state, so if there
3008         ** have been no vlan's registered do nothing.
3009         */
3010         if (adapter->num_vlans == 0)
3011                 return;
3012
3013         /* Setup the queues for vlans */
3014         for (int i = 0; i < adapter->num_queues; i++) {
3015                 rxr = &adapter->rx_rings[i];
3016                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3017                 if (hw->mac.type != ixgbe_mac_82598EB) {
3018                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3019                         ctrl |= IXGBE_RXDCTL_VME;
3020                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3021                 }
3022                 rxr->vtag_strip = TRUE;
3023         }
3024
3025         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3026                 return;
3027         /*
3028         ** A soft reset zero's out the VFTA, so
3029         ** we need to repopulate it now.
3030         */
3031         for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3032                 if (adapter->shadow_vfta[i] != 0)
3033                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3034                             adapter->shadow_vfta[i]);
3035
3036         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3037         /* Enable the Filter Table if enabled */
3038         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3039                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3040                 ctrl |= IXGBE_VLNCTRL_VFE;
3041         }
3042         if (hw->mac.type == ixgbe_mac_82598EB)
3043                 ctrl |= IXGBE_VLNCTRL_VME;
3044         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3045 }
3046
3047 static void
3048 ixgbe_enable_intr(struct adapter *adapter)
3049 {
3050         struct ixgbe_hw *hw = &adapter->hw;
3051         struct ix_queue *que = adapter->queues;
3052         u32             mask, fwsm;
3053
3054         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3055         /* Enable Fan Failure detection */
3056         if (hw->device_id == IXGBE_DEV_ID_82598AT)
3057                     mask |= IXGBE_EIMS_GPI_SDP1;
3058
3059         switch (adapter->hw.mac.type) {
3060                 case ixgbe_mac_82599EB:
3061                         mask |= IXGBE_EIMS_ECC;
3062                         /* Temperature sensor on some adapters */
3063                         mask |= IXGBE_EIMS_GPI_SDP0;
3064                         /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3065                         mask |= IXGBE_EIMS_GPI_SDP1;
3066                         mask |= IXGBE_EIMS_GPI_SDP2;
3067 #ifdef IXGBE_FDIR
3068                         mask |= IXGBE_EIMS_FLOW_DIR;
3069 #endif
3070                         break;
3071                 case ixgbe_mac_X540:
3072                         /* Detect if Thermal Sensor is enabled */
3073                         fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3074                         if (fwsm & IXGBE_FWSM_TS_ENABLED)
3075                                 mask |= IXGBE_EIMS_TS;
3076                         mask |= IXGBE_EIMS_ECC;
3077 #ifdef IXGBE_FDIR
3078                         mask |= IXGBE_EIMS_FLOW_DIR;
3079 #endif
3080                         break;
3081                 case ixgbe_mac_X550:
3082                 case ixgbe_mac_X550EM_x:
3083                         /* MAC thermal sensor is automatically enabled */
3084                         mask |= IXGBE_EIMS_TS;
3085                         /* Some devices use SDP0 for important information */
3086                         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3087                             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3088                                 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3089                         mask |= IXGBE_EIMS_ECC;
3090 #ifdef IXGBE_FDIR
3091                         mask |= IXGBE_EIMS_FLOW_DIR;
3092 #endif
3093                 /* falls through */
3094                 default:
3095                         break;
3096         }
3097
3098         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3099
3100         /* With MSI-X we use auto clear */
3101         if (adapter->msix_mem) {
3102                 mask = IXGBE_EIMS_ENABLE_MASK;
3103                 /* Don't autoclear Link */
3104                 mask &= ~IXGBE_EIMS_OTHER;
3105                 mask &= ~IXGBE_EIMS_LSC;
3106                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3107         }
3108
3109         /*
3110         ** Now enable all queues, this is done separately to
3111         ** allow for handling the extended (beyond 32) MSIX
3112         ** vectors that can be used by 82599
3113         */
3114         for (int i = 0; i < adapter->num_queues; i++, que++)
3115                 ixgbe_enable_queue(adapter, que->msix);
3116
3117         IXGBE_WRITE_FLUSH(hw);
3118
3119         return;
3120 }
3121
3122 static void
3123 ixgbe_disable_intr(struct adapter *adapter)
3124 {
3125         if (adapter->msix_mem)
3126                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3127         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3128                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3129         } else {
3130                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3131                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3132                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3133         }
3134         IXGBE_WRITE_FLUSH(&adapter->hw);
3135         return;
3136 }
3137
3138 /*
3139 ** Get the width and transaction speed of
3140 ** the slot this adapter is plugged into.
3141 */
3142 static void
3143 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3144 {
3145         device_t                dev = ((struct ixgbe_osdep *)hw->back)->dev;
3146         struct ixgbe_mac_info   *mac = &hw->mac;
3147         u16                     link;
3148         u32                     offset;
3149
3150         /* For most devices simply call the shared code routine */
3151         if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3152                 ixgbe_get_bus_info(hw);
3153                 /* These devices don't use PCI-E */
3154                 switch (hw->mac.type) {
3155                 case ixgbe_mac_X550EM_x:
3156                         return;
3157                 default:
3158                         goto display;
3159                 }
3160         }
3161
3162         /*
3163         ** For the Quad port adapter we need to parse back
3164         ** up the PCI tree to find the speed of the expansion
3165         ** slot into which this adapter is plugged. A bit more work.
3166         */
3167         dev = device_get_parent(device_get_parent(dev));
3168 #ifdef IXGBE_DEBUG
3169         device_printf(dev, "parent pcib = %x,%x,%x\n",
3170             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3171 #endif
3172         dev = device_get_parent(device_get_parent(dev));
3173 #ifdef IXGBE_DEBUG
3174         device_printf(dev, "slot pcib = %x,%x,%x\n",
3175             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3176 #endif
3177         /* Now get the PCI Express Capabilities offset */
3178         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3179         /* ...and read the Link Status Register */
3180         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3181         switch (link & IXGBE_PCI_LINK_WIDTH) {
3182         case IXGBE_PCI_LINK_WIDTH_1:
3183                 hw->bus.width = ixgbe_bus_width_pcie_x1;
3184                 break;
3185         case IXGBE_PCI_LINK_WIDTH_2:
3186                 hw->bus.width = ixgbe_bus_width_pcie_x2;
3187                 break;
3188         case IXGBE_PCI_LINK_WIDTH_4:
3189                 hw->bus.width = ixgbe_bus_width_pcie_x4;
3190                 break;
3191         case IXGBE_PCI_LINK_WIDTH_8:
3192                 hw->bus.width = ixgbe_bus_width_pcie_x8;
3193                 break;
3194         default:
3195                 hw->bus.width = ixgbe_bus_width_unknown;
3196                 break;
3197         }
3198
3199         switch (link & IXGBE_PCI_LINK_SPEED) {
3200         case IXGBE_PCI_LINK_SPEED_2500:
3201                 hw->bus.speed = ixgbe_bus_speed_2500;
3202                 break;
3203         case IXGBE_PCI_LINK_SPEED_5000:
3204                 hw->bus.speed = ixgbe_bus_speed_5000;
3205                 break;
3206         case IXGBE_PCI_LINK_SPEED_8000:
3207                 hw->bus.speed = ixgbe_bus_speed_8000;
3208                 break;
3209         default:
3210                 hw->bus.speed = ixgbe_bus_speed_unknown;
3211                 break;
3212         }
3213
3214         mac->ops.set_lan_id(hw);
3215
3216 display:
3217         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3218             ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3219             (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3220             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3221             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3222             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3223             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3224             ("Unknown"));
3225
3226         if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3227             ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3228             (hw->bus.speed == ixgbe_bus_speed_2500))) {
3229                 device_printf(dev, "PCI-Express bandwidth available"
3230                     " for this card\n     is not sufficient for"
3231                     " optimal performance.\n");
3232                 device_printf(dev, "For optimal performance a x8 "
3233                     "PCIE, or x4 PCIE Gen2 slot is required.\n");
3234         }
3235         if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3236             ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3237             (hw->bus.speed < ixgbe_bus_speed_8000))) {
3238                 device_printf(dev, "PCI-Express bandwidth available"
3239                     " for this card\n     is not sufficient for"
3240                     " optimal performance.\n");
3241                 device_printf(dev, "For optimal performance a x8 "
3242                     "PCIE Gen3 slot is required.\n");
3243         }
3244
3245         return;
3246 }
3247
3248
3249 /*
3250 ** Setup the correct IVAR register for a particular MSIX interrupt
3251 **   (yes this is all very magic and confusing :)
3252 **  - entry is the register array entry
3253 **  - vector is the MSIX vector for this queue
3254 **  - type is RX/TX/MISC
3255 */
3256 static void
3257 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3258 {
3259         struct ixgbe_hw *hw = &adapter->hw;
3260         u32 ivar, index;
3261
3262         vector |= IXGBE_IVAR_ALLOC_VAL;
3263
3264         switch (hw->mac.type) {
3265
3266         case ixgbe_mac_82598EB:
3267                 if (type == -1)
3268                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3269                 else
3270                         entry += (type * 64);
3271                 index = (entry >> 2) & 0x1F;
3272                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3273                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3274                 ivar |= (vector << (8 * (entry & 0x3)));
3275                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3276                 break;
3277
3278         case ixgbe_mac_82599EB:
3279         case ixgbe_mac_X540:
3280         case ixgbe_mac_X550:
3281         case ixgbe_mac_X550EM_x:
3282                 if (type == -1) { /* MISC IVAR */
3283                         index = (entry & 1) * 8;
3284                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3285                         ivar &= ~(0xFF << index);
3286                         ivar |= (vector << index);
3287                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3288                 } else {        /* RX/TX IVARS */
3289                         index = (16 * (entry & 1)) + (8 * type);
3290                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3291                         ivar &= ~(0xFF << index);
3292                         ivar |= (vector << index);
3293                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3294                 }
3295
3296         default:
3297                 break;
3298         }
3299 }
3300
3301 static void
3302 ixgbe_configure_ivars(struct adapter *adapter)
3303 {
3304         struct  ix_queue *que = adapter->queues;
3305         u32 newitr;
3306
3307         if (ixgbe_max_interrupt_rate > 0)
3308                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3309         else {
3310                 /*
3311                 ** Disable DMA coalescing if interrupt moderation is
3312                 ** disabled.
3313                 */
3314                 adapter->dmac = 0;
3315                 newitr = 0;
3316         }
3317
3318         for (int i = 0; i < adapter->num_queues; i++, que++) {
3319                 /* First the RX queue entry */
3320                 ixgbe_set_ivar(adapter, i, que->msix, 0);
3321                 /* ... and the TX */
3322                 ixgbe_set_ivar(adapter, i, que->msix, 1);
3323                 /* Set an Initial EITR value */
3324                 IXGBE_WRITE_REG(&adapter->hw,
3325                     IXGBE_EITR(que->msix), newitr);
3326         }
3327
3328         /* For the Link interrupt */
3329         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3330 }
3331
3332 /*
3333 ** ixgbe_sfp_probe - called in the local timer to
3334 ** determine if a port had optics inserted.
3335 */  
3336 static bool ixgbe_sfp_probe(struct adapter *adapter)
3337 {
3338         struct ixgbe_hw *hw = &adapter->hw;
3339         device_t        dev = adapter->dev;
3340         bool            result = FALSE;
3341
3342         if ((hw->phy.type == ixgbe_phy_nl) &&
3343             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3344                 s32 ret = hw->phy.ops.identify_sfp(hw);
3345                 if (ret)
3346                         goto out;
3347                 ret = hw->phy.ops.reset(hw);
3348                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3349                         device_printf(dev,"Unsupported SFP+ module detected!");
3350                         printf(" Reload driver with supported module.\n");
3351                         adapter->sfp_probe = FALSE;
3352                         goto out;
3353                 } else
3354                         device_printf(dev,"SFP+ module detected!\n");
3355                 /* We now have supported optics */
3356                 adapter->sfp_probe = FALSE;
3357                 /* Set the optics type so system reports correctly */
3358                 ixgbe_setup_optics(adapter);
3359                 result = TRUE;
3360         }
3361 out:
3362         return (result);
3363 }
3364
3365 /*
3366 ** Tasklet handler for MSIX Link interrupts
3367 **  - do outside interrupt since it might sleep
3368 */
3369 static void
3370 ixgbe_handle_link(void *context, int pending)
3371 {
3372         struct adapter  *adapter = context;
3373
3374         ixgbe_check_link(&adapter->hw,
3375             &adapter->link_speed, &adapter->link_up, 0);
3376         ixgbe_update_link_status(adapter);
3377 }
3378
3379 /*
3380 ** Tasklet for handling SFP module interrupts
3381 */
3382 static void
3383 ixgbe_handle_mod(void *context, int pending)
3384 {
3385         struct adapter  *adapter = context;
3386         struct ixgbe_hw *hw = &adapter->hw;
3387         device_t        dev = adapter->dev;
3388         u32 err;
3389
3390         err = hw->phy.ops.identify_sfp(hw);
3391         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3392                 device_printf(dev,
3393                     "Unsupported SFP+ module type was detected.\n");
3394                 return;
3395         }
3396         err = hw->mac.ops.setup_sfp(hw);
3397         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3398                 device_printf(dev,
3399                     "Setup failure - unsupported SFP+ module type.\n");
3400                 return;
3401         }
3402         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3403         return;
3404 }
3405
3406
3407 /*
3408 ** Tasklet for handling MSF (multispeed fiber) interrupts
3409 */
3410 static void
3411 ixgbe_handle_msf(void *context, int pending)
3412 {
3413         struct adapter  *adapter = context;
3414         struct ixgbe_hw *hw = &adapter->hw;
3415         u32 autoneg;
3416         bool negotiate;
3417         int err;
3418
3419         err = hw->phy.ops.identify_sfp(hw);
3420         if (!err) {
3421                 ixgbe_setup_optics(adapter);
3422                 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3423         }
3424
3425         autoneg = hw->phy.autoneg_advertised;
3426         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3427                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3428         if (hw->mac.ops.setup_link)
3429                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3430
3431         ifmedia_removeall(&adapter->media);
3432         ixgbe_add_media_types(adapter);
3433         return;
3434 }
3435
3436 /*
3437 ** Tasklet for handling interrupts from an external PHY
3438 */
3439 static void
3440 ixgbe_handle_phy(void *context, int pending)
3441 {
3442         struct adapter  *adapter = context;
3443         struct ixgbe_hw *hw = &adapter->hw;
3444         int error;
3445
3446         error = hw->phy.ops.handle_lasi(hw);
3447         if (error == IXGBE_ERR_OVERTEMP)
3448                 device_printf(adapter->dev,
3449                     "CRITICAL: EXTERNAL PHY OVER TEMP!! "
3450                     " PHY will downshift to lower power state!\n");
3451         else if (error)
3452                 device_printf(adapter->dev,
3453                     "Error handling LASI interrupt: %d\n",
3454                     error);
3455         return;
3456 }
3457
3458 #ifdef IXGBE_FDIR
3459 /*
3460 ** Tasklet for reinitializing the Flow Director filter table
3461 */
3462 static void
3463 ixgbe_reinit_fdir(void *context, int pending)
3464 {
3465         struct adapter  *adapter = context;
3466         struct ifnet   *ifp = adapter->ifp;
3467
3468         if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3469                 return;
3470         ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3471         adapter->fdir_reinit = 0;
3472         /* re-enable flow director interrupts */
3473         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3474         /* Restart the interface */
3475         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3476         return;
3477 }
3478 #endif
3479
3480 /*********************************************************************
3481  *
3482  *  Configure DMA Coalescing
3483  *
3484  **********************************************************************/
3485 static void
3486 ixgbe_config_dmac(struct adapter *adapter)
3487 {
3488         struct ixgbe_hw *hw = &adapter->hw;
3489         struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
3490
3491         if (hw->mac.type < ixgbe_mac_X550 ||
3492             !hw->mac.ops.dmac_config)
3493                 return;
3494
3495         if (dcfg->watchdog_timer ^ adapter->dmac ||
3496             dcfg->link_speed ^ adapter->link_speed) {
3497                 dcfg->watchdog_timer = adapter->dmac;
3498                 dcfg->fcoe_en = false;
3499                 dcfg->link_speed = adapter->link_speed;
3500                 dcfg->num_tcs = 1;
3501                 
3502                 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
3503                     dcfg->watchdog_timer, dcfg->link_speed);
3504
3505                 hw->mac.ops.dmac_config(hw);
3506         }
3507 }
3508
3509 /*
3510  * Checks whether the adapter supports Energy Efficient Ethernet
3511  * or not, based on device ID.
3512  */
3513 static void
3514 ixgbe_check_eee_support(struct adapter *adapter)
3515 {
3516         struct ixgbe_hw *hw = &adapter->hw;
3517
3518         adapter->eee_support = adapter->eee_enabled =
3519             (hw->device_id == IXGBE_DEV_ID_X550T ||
3520                 hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
3521 }
3522
3523 /*
3524  * Checks whether the adapter's ports are capable of
3525  * Wake On LAN by reading the adapter's NVM.
3526  *
3527  * Sets each port's hw->wol_enabled value depending
3528  * on the value read here.
3529  */
3530 static void
3531 ixgbe_check_wol_support(struct adapter *adapter)
3532 {
3533         struct ixgbe_hw *hw = &adapter->hw;
3534         u16 dev_caps = 0;
3535
3536         /* Find out WoL support for port */
3537         adapter->wol_support = hw->wol_enabled = 0;
3538         ixgbe_get_device_caps(hw, &dev_caps);
3539         if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
3540             ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
3541                 hw->bus.func == 0))
3542             adapter->wol_support = hw->wol_enabled = 1;
3543
3544         /* Save initial wake up filter configuration */
3545         adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
3546
3547         return;
3548 }
3549
3550 /*
3551  * Prepare the adapter/port for LPLU and/or WoL
3552  */
3553 static int
3554 ixgbe_setup_low_power_mode(struct adapter *adapter)
3555 {
3556         struct ixgbe_hw *hw = &adapter->hw;
3557         device_t dev = adapter->dev;
3558         s32 error = 0;
3559
3560         mtx_assert(&adapter->core_mtx, MA_OWNED);
3561
3562         /* Limit power management flow to X550EM baseT */
3563         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T
3564             && hw->phy.ops.enter_lplu) {
3565                 /* Turn off support for APM wakeup. (Using ACPI instead) */
3566                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
3567                     IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2);
3568
3569                 /*
3570                  * Clear Wake Up Status register to prevent any previous wakeup
3571                  * events from waking us up immediately after we suspend.
3572                  */
3573                 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3574
3575                 /*
3576                  * Program the Wakeup Filter Control register with user filter
3577                  * settings
3578                  */
3579                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3580
3581                 /* Enable wakeups and power management in Wakeup Control */
3582                 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3583                     IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3584
3585                 /* X550EM baseT adapters need a special LPLU flow */
3586                 hw->phy.reset_disable = true;
3587                 ixgbe_stop(adapter);
3588                 error = hw->phy.ops.enter_lplu(hw);
3589                 if (error)
3590                         device_printf(dev,
3591                             "Error entering LPLU: %d\n", error);
3592                 hw->phy.reset_disable = false;
3593         } else {
3594                 /* Just stop for other adapters */
3595                 ixgbe_stop(adapter);
3596         }
3597
3598         return error;
3599 }
3600
3601 /**********************************************************************
3602  *
3603  *  Update the board statistics counters.
3604  *
3605  **********************************************************************/
3606 static void
3607 ixgbe_update_stats_counters(struct adapter *adapter)
3608 {
3609         struct ixgbe_hw *hw = &adapter->hw;
3610         u32 missed_rx = 0, bprc, lxon, lxoff, total;
3611         u64 total_missed_rx = 0;
3612
3613         adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3614         adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3615         adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3616         adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3617
3618         for (int i = 0; i < 16; i++) {
3619                 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3620                 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3621                 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3622         }
3623         adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3624         adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3625         adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3626
3627         /* Hardware workaround, gprc counts missed packets */
3628         adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3629         adapter->stats.pf.gprc -= missed_rx;
3630
3631         if (hw->mac.type != ixgbe_mac_82598EB) {
3632                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3633                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3634                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3635                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3636                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3637                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3638                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3639                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3640         } else {
3641                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3642                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3643                 /* 82598 only has a counter in the high register */
3644                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3645                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3646                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3647         }
3648
3649         /*
3650          * Workaround: mprc hardware is incorrectly counting
3651          * broadcasts, so for now we subtract those.
3652          */
3653         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3654         adapter->stats.pf.bprc += bprc;
3655         adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3656         if (hw->mac.type == ixgbe_mac_82598EB)
3657                 adapter->stats.pf.mprc -= bprc;
3658
3659         adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3660         adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3661         adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3662         adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3663         adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3664         adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3665
3666         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3667         adapter->stats.pf.lxontxc += lxon;
3668         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3669         adapter->stats.pf.lxofftxc += lxoff;
3670         total = lxon + lxoff;
3671
3672         adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3673         adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3674         adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3675         adapter->stats.pf.gptc -= total;
3676         adapter->stats.pf.mptc -= total;
3677         adapter->stats.pf.ptc64 -= total;
3678         adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3679
3680         adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3681         adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3682         adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3683         adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3684         adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3685         adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3686         adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3687         adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3688         adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3689         adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3690         adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3691         adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3692         adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3693         adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3694         adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3695         adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3696         adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3697         adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3698         /* Only read FCOE on 82599 */
3699         if (hw->mac.type != ixgbe_mac_82598EB) {
3700                 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3701                 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3702                 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3703                 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3704                 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3705         }
3706
3707         /* Fill out the OS statistics structure */
3708         IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3709         IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3710         IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3711         IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3712         IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3713         IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3714         IXGBE_SET_COLLISIONS(adapter, 0);
3715         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3716         IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3717             + adapter->stats.pf.rlec);
3718 }
3719
3720 #if __FreeBSD_version >= 1100036
3721 static uint64_t
3722 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3723 {
3724         struct adapter *adapter;
3725         struct tx_ring *txr;
3726         uint64_t rv;
3727
3728         adapter = if_getsoftc(ifp);
3729
3730         switch (cnt) {
3731         case IFCOUNTER_IPACKETS:
3732                 return (adapter->ipackets);
3733         case IFCOUNTER_OPACKETS:
3734                 return (adapter->opackets);
3735         case IFCOUNTER_IBYTES:
3736                 return (adapter->ibytes);
3737         case IFCOUNTER_OBYTES:
3738                 return (adapter->obytes);
3739         case IFCOUNTER_IMCASTS:
3740                 return (adapter->imcasts);
3741         case IFCOUNTER_OMCASTS:
3742                 return (adapter->omcasts);
3743         case IFCOUNTER_COLLISIONS:
3744                 return (0);
3745         case IFCOUNTER_IQDROPS:
3746                 return (adapter->iqdrops);
3747         case IFCOUNTER_OQDROPS:
3748                 rv = 0;
3749                 txr = adapter->tx_rings;
3750                 for (int i = 0; i < adapter->num_queues; i++, txr++)
3751                         rv += txr->br->br_drops;
3752                 return (rv);
3753         case IFCOUNTER_IERRORS:
3754                 return (adapter->ierrors);
3755         default:
3756                 return (if_get_counter_default(ifp, cnt));
3757         }
3758 }
3759 #endif
3760
3761 /** ixgbe_sysctl_tdh_handler - Handler function
3762  *  Retrieves the TDH value from the hardware
3763  */
3764 static int 
3765 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3766 {
3767         int error;
3768
3769         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3770         if (!txr) return 0;
3771
3772         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3773         error = sysctl_handle_int(oidp, &val, 0, req);
3774         if (error || !req->newptr)
3775                 return error;
3776         return 0;
3777 }
3778
3779 /** ixgbe_sysctl_tdt_handler - Handler function
3780  *  Retrieves the TDT value from the hardware
3781  */
3782 static int 
3783 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3784 {
3785         int error;
3786
3787         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3788         if (!txr) return 0;
3789
3790         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3791         error = sysctl_handle_int(oidp, &val, 0, req);
3792         if (error || !req->newptr)
3793                 return error;
3794         return 0;
3795 }
3796
3797 /** ixgbe_sysctl_rdh_handler - Handler function
3798  *  Retrieves the RDH value from the hardware
3799  */
3800 static int 
3801 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3802 {
3803         int error;
3804
3805         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3806         if (!rxr) return 0;
3807
3808         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3809         error = sysctl_handle_int(oidp, &val, 0, req);
3810         if (error || !req->newptr)
3811                 return error;
3812         return 0;
3813 }
3814
3815 /** ixgbe_sysctl_rdt_handler - Handler function
3816  *  Retrieves the RDT value from the hardware
3817  */
3818 static int 
3819 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3820 {
3821         int error;
3822
3823         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3824         if (!rxr) return 0;
3825
3826         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3827         error = sysctl_handle_int(oidp, &val, 0, req);
3828         if (error || !req->newptr)
3829                 return error;
3830         return 0;
3831 }
3832
3833 static int
3834 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3835 {
3836         int error;
3837         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3838         unsigned int reg, usec, rate;
3839
3840         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3841         usec = ((reg & 0x0FF8) >> 3);
3842         if (usec > 0)
3843                 rate = 500000 / usec;
3844         else
3845                 rate = 0;
3846         error = sysctl_handle_int(oidp, &rate, 0, req);
3847         if (error || !req->newptr)
3848                 return error;
3849         reg &= ~0xfff; /* default, no limitation */
3850         ixgbe_max_interrupt_rate = 0;
3851         if (rate > 0 && rate < 500000) {
3852                 if (rate < 1000)
3853                         rate = 1000;
3854                 ixgbe_max_interrupt_rate = rate;
3855                 reg |= ((4000000/rate) & 0xff8 );
3856         }
3857         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3858         return 0;
3859 }
3860
3861 static void
3862 ixgbe_add_device_sysctls(struct adapter *adapter)
3863 {
3864         device_t dev = adapter->dev;
3865         struct ixgbe_hw *hw = &adapter->hw;
3866         struct sysctl_oid_list *child;
3867         struct sysctl_ctx_list *ctx;
3868
3869         ctx = device_get_sysctl_ctx(dev);
3870         child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
3871
3872         /* Sysctls for all devices */
3873         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "fc",
3874                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3875                         ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
3876
3877         SYSCTL_ADD_INT(ctx, child, OID_AUTO, "enable_aim",
3878                         CTLFLAG_RW,
3879                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
3880
3881         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "advertise_speed",
3882                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3883                         ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
3884
3885         SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "thermal_test",
3886                         CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3887                         ixgbe_sysctl_thermal_test, "I", "Thermal Test");
3888
3889         /* for X550 devices */
3890         if (hw->mac.type >= ixgbe_mac_X550)
3891                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "dmac",
3892                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3893                                 ixgbe_sysctl_dmac, "I", "DMA Coalesce");
3894
3895         /* for X550T and X550EM backplane devices */
3896         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3897             hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
3898                 struct sysctl_oid *eee_node;
3899                 struct sysctl_oid_list *eee_list;
3900
3901                 eee_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "eee",
3902                                            CTLFLAG_RD, NULL,
3903                                            "Energy Efficient Ethernet sysctls");
3904                 eee_list = SYSCTL_CHILDREN(eee_node);
3905
3906                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "enable",
3907                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3908                                 ixgbe_sysctl_eee_enable, "I",
3909                                 "Enable or Disable EEE");
3910
3911                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "negotiated",
3912                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3913                                 ixgbe_sysctl_eee_negotiated, "I",
3914                                 "EEE negotiated on link");
3915
3916                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "tx_lpi_status",
3917                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3918                                 ixgbe_sysctl_eee_tx_lpi_status, "I",
3919                                 "Whether or not TX link is in LPI state");
3920
3921                 SYSCTL_ADD_PROC(ctx, eee_list, OID_AUTO, "rx_lpi_status",
3922                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3923                                 ixgbe_sysctl_eee_rx_lpi_status, "I",
3924                                 "Whether or not RX link is in LPI state");
3925         }
3926
3927         /* for certain 10GBaseT devices */
3928         if (hw->device_id == IXGBE_DEV_ID_X550T ||
3929             hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3930                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wol_enable",
3931                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3932                                 ixgbe_sysctl_wol_enable, "I",
3933                                 "Enable/Disable Wake on LAN");
3934
3935                 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "wufc",
3936                                 CTLTYPE_INT | CTLFLAG_RW, adapter, 0,
3937                                 ixgbe_sysctl_wufc, "I",
3938                                 "Enable/Disable Wake Up Filters");
3939         }
3940
3941         /* for X550EM 10GBaseT devices */
3942         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3943                 struct sysctl_oid *phy_node;
3944                 struct sysctl_oid_list *phy_list;
3945
3946                 phy_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "phy",
3947                                            CTLFLAG_RD, NULL,
3948                                            "External PHY sysctls");
3949                 phy_list = SYSCTL_CHILDREN(phy_node);
3950
3951                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "temp",
3952                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3953                                 ixgbe_sysctl_phy_temp, "I",
3954                                 "Current External PHY Temperature (Celsius)");
3955
3956                 SYSCTL_ADD_PROC(ctx, phy_list, OID_AUTO, "overtemp_occurred",
3957                                 CTLTYPE_INT | CTLFLAG_RD, adapter, 0,
3958                                 ixgbe_sysctl_phy_overtemp_occurred, "I",
3959                                 "External PHY High Temperature Event Occurred");
3960         }
3961 }
3962
3963 /*
3964  * Add sysctl variables, one per statistic, to the system.
3965  */
3966 static void
3967 ixgbe_add_hw_stats(struct adapter *adapter)
3968 {
3969         device_t dev = adapter->dev;
3970
3971         struct tx_ring *txr = adapter->tx_rings;
3972         struct rx_ring *rxr = adapter->rx_rings;
3973
3974         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3975         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3976         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3977         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3978
3979         struct sysctl_oid *stat_node, *queue_node;
3980         struct sysctl_oid_list *stat_list, *queue_list;
3981
3982 #define QUEUE_NAME_LEN 32
3983         char namebuf[QUEUE_NAME_LEN];
3984
3985         /* Driver Statistics */
3986         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3987                         CTLFLAG_RD, &adapter->dropped_pkts,
3988                         "Driver dropped packets");
3989         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3990                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3991                         "m_defrag() failed");
3992         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3993                         CTLFLAG_RD, &adapter->watchdog_events,
3994                         "Watchdog timeouts");
3995         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3996                         CTLFLAG_RD, &adapter->link_irq,
3997                         "Link MSIX IRQ Handled");
3998
3999         for (int i = 0; i < adapter->num_queues; i++, txr++) {
4000                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4001                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4002                                             CTLFLAG_RD, NULL, "Queue Name");
4003                 queue_list = SYSCTL_CHILDREN(queue_node);
4004
4005                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
4006                                 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
4007                                 sizeof(&adapter->queues[i]),
4008                                 ixgbe_sysctl_interrupt_rate_handler, "IU",
4009                                 "Interrupt Rate");
4010                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
4011                                 CTLFLAG_RD, &(adapter->queues[i].irqs),
4012                                 "irqs on this queue");
4013                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
4014                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4015                                 ixgbe_sysctl_tdh_handler, "IU",
4016                                 "Transmit Descriptor Head");
4017                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
4018                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
4019                                 ixgbe_sysctl_tdt_handler, "IU",
4020                                 "Transmit Descriptor Tail");
4021                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
4022                                 CTLFLAG_RD, &txr->tso_tx,
4023                                 "TSO");
4024                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
4025                                 CTLFLAG_RD, &txr->no_tx_dma_setup,
4026                                 "Driver tx dma failure in xmit");
4027                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
4028                                 CTLFLAG_RD, &txr->no_desc_avail,
4029                                 "Queue No Descriptor Available");
4030                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
4031                                 CTLFLAG_RD, &txr->total_packets,
4032                                 "Queue Packets Transmitted");
4033                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "br_drops",
4034                                 CTLFLAG_RD, &txr->br->br_drops,
4035                                 "Packets dropped in buf_ring");
4036         }
4037
4038         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4039                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4040                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4041                                             CTLFLAG_RD, NULL, "Queue Name");
4042                 queue_list = SYSCTL_CHILDREN(queue_node);
4043
4044                 struct lro_ctrl *lro = &rxr->lro;
4045
4046                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
4047                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
4048                                             CTLFLAG_RD, NULL, "Queue Name");
4049                 queue_list = SYSCTL_CHILDREN(queue_node);
4050
4051                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
4052                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4053                                 ixgbe_sysctl_rdh_handler, "IU",
4054                                 "Receive Descriptor Head");
4055                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
4056                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
4057                                 ixgbe_sysctl_rdt_handler, "IU",
4058                                 "Receive Descriptor Tail");
4059                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
4060                                 CTLFLAG_RD, &rxr->rx_packets,
4061                                 "Queue Packets Received");
4062                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
4063                                 CTLFLAG_RD, &rxr->rx_bytes,
4064                                 "Queue Bytes Received");
4065                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
4066                                 CTLFLAG_RD, &rxr->rx_copies,
4067                                 "Copied RX Frames");
4068                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
4069                                 CTLFLAG_RD, &lro->lro_queued, 0,
4070                                 "LRO Queued");
4071                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
4072                                 CTLFLAG_RD, &lro->lro_flushed, 0,
4073                                 "LRO Flushed");
4074         }
4075
4076         /* MAC stats get the own sub node */
4077
4078         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
4079                                     CTLFLAG_RD, NULL, "MAC Statistics");
4080         stat_list = SYSCTL_CHILDREN(stat_node);
4081
4082         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
4083                         CTLFLAG_RD, &stats->crcerrs,
4084                         "CRC Errors");
4085         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
4086                         CTLFLAG_RD, &stats->illerrc,
4087                         "Illegal Byte Errors");
4088         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
4089                         CTLFLAG_RD, &stats->errbc,
4090                         "Byte Errors");
4091         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
4092                         CTLFLAG_RD, &stats->mspdc,
4093                         "MAC Short Packets Discarded");
4094         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
4095                         CTLFLAG_RD, &stats->mlfc,
4096                         "MAC Local Faults");
4097         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
4098                         CTLFLAG_RD, &stats->mrfc,
4099                         "MAC Remote Faults");
4100         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
4101                         CTLFLAG_RD, &stats->rlec,
4102                         "Receive Length Errors");
4103
4104         /* Flow Control stats */
4105         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
4106                         CTLFLAG_RD, &stats->lxontxc,
4107                         "Link XON Transmitted");
4108         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
4109                         CTLFLAG_RD, &stats->lxonrxc,
4110                         "Link XON Received");
4111         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
4112                         CTLFLAG_RD, &stats->lxofftxc,
4113                         "Link XOFF Transmitted");
4114         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
4115                         CTLFLAG_RD, &stats->lxoffrxc,
4116                         "Link XOFF Received");
4117
4118         /* Packet Reception Stats */
4119         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
4120                         CTLFLAG_RD, &stats->tor, 
4121                         "Total Octets Received"); 
4122         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
4123                         CTLFLAG_RD, &stats->gorc, 
4124                         "Good Octets Received"); 
4125         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
4126                         CTLFLAG_RD, &stats->tpr,
4127                         "Total Packets Received");
4128         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
4129                         CTLFLAG_RD, &stats->gprc,
4130                         "Good Packets Received");
4131         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
4132                         CTLFLAG_RD, &stats->mprc,
4133                         "Multicast Packets Received");
4134         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
4135                         CTLFLAG_RD, &stats->bprc,
4136                         "Broadcast Packets Received");
4137         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
4138                         CTLFLAG_RD, &stats->prc64,
4139                         "64 byte frames received ");
4140         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
4141                         CTLFLAG_RD, &stats->prc127,
4142                         "65-127 byte frames received");
4143         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
4144                         CTLFLAG_RD, &stats->prc255,
4145                         "128-255 byte frames received");
4146         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
4147                         CTLFLAG_RD, &stats->prc511,
4148                         "256-511 byte frames received");
4149         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
4150                         CTLFLAG_RD, &stats->prc1023,
4151                         "512-1023 byte frames received");
4152         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
4153                         CTLFLAG_RD, &stats->prc1522,
4154                         "1023-1522 byte frames received");
4155         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
4156                         CTLFLAG_RD, &stats->ruc,
4157                         "Receive Undersized");
4158         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
4159                         CTLFLAG_RD, &stats->rfc,
4160                         "Fragmented Packets Received ");
4161         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
4162                         CTLFLAG_RD, &stats->roc,
4163                         "Oversized Packets Received");
4164         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
4165                         CTLFLAG_RD, &stats->rjc,
4166                         "Received Jabber");
4167         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
4168                         CTLFLAG_RD, &stats->mngprc,
4169                         "Management Packets Received");
4170         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
4171                         CTLFLAG_RD, &stats->mngptc,
4172                         "Management Packets Dropped");
4173         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
4174                         CTLFLAG_RD, &stats->xec,
4175                         "Checksum Errors");
4176
4177         /* Packet Transmission Stats */
4178         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
4179                         CTLFLAG_RD, &stats->gotc, 
4180                         "Good Octets Transmitted"); 
4181         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
4182                         CTLFLAG_RD, &stats->tpt,
4183                         "Total Packets Transmitted");
4184         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
4185                         CTLFLAG_RD, &stats->gptc,
4186                         "Good Packets Transmitted");
4187         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
4188                         CTLFLAG_RD, &stats->bptc,
4189                         "Broadcast Packets Transmitted");
4190         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
4191                         CTLFLAG_RD, &stats->mptc,
4192                         "Multicast Packets Transmitted");
4193         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
4194                         CTLFLAG_RD, &stats->mngptc,
4195                         "Management Packets Transmitted");
4196         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
4197                         CTLFLAG_RD, &stats->ptc64,
4198                         "64 byte frames transmitted ");
4199         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
4200                         CTLFLAG_RD, &stats->ptc127,
4201                         "65-127 byte frames transmitted");
4202         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
4203                         CTLFLAG_RD, &stats->ptc255,
4204                         "128-255 byte frames transmitted");
4205         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
4206                         CTLFLAG_RD, &stats->ptc511,
4207                         "256-511 byte frames transmitted");
4208         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
4209                         CTLFLAG_RD, &stats->ptc1023,
4210                         "512-1023 byte frames transmitted");
4211         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
4212                         CTLFLAG_RD, &stats->ptc1522,
4213                         "1024-1522 byte frames transmitted");
4214 }
4215
4216 /*
4217 ** Set flow control using sysctl:
4218 ** Flow control values:
4219 **      0 - off
4220 **      1 - rx pause
4221 **      2 - tx pause
4222 **      3 - full
4223 */
4224 static int
4225 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4226 {
4227         int error, last;
4228         struct adapter *adapter = (struct adapter *) arg1;
4229
4230         last = adapter->fc;
4231         error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
4232         if ((error) || (req->newptr == NULL))
4233                 return (error);
4234
4235         /* Don't bother if it's not changed */
4236         if (adapter->fc == last)
4237                 return (0);
4238
4239         switch (adapter->fc) {
4240                 case ixgbe_fc_rx_pause:
4241                 case ixgbe_fc_tx_pause:
4242                 case ixgbe_fc_full:
4243                         adapter->hw.fc.requested_mode = adapter->fc;
4244                         if (adapter->num_queues > 1)
4245                                 ixgbe_disable_rx_drop(adapter);
4246                         break;
4247                 case ixgbe_fc_none:
4248                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
4249                         if (adapter->num_queues > 1)
4250                                 ixgbe_enable_rx_drop(adapter);
4251                         break;
4252                 default:
4253                         adapter->fc = last;
4254                         return (EINVAL);
4255         }
4256         /* Don't autoneg if forcing a value */
4257         adapter->hw.fc.disable_fc_autoneg = TRUE;
4258         ixgbe_fc_enable(&adapter->hw);
4259         return error;
4260 }
4261
4262 /*
4263 ** Control advertised link speed:
4264 **      Flags:
4265 **      0x1 - advertise 100 Mb
4266 **      0x2 - advertise 1G
4267 **      0x4 - advertise 10G
4268 */
4269 static int
4270 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4271 {
4272         int                     error = 0, requested;
4273         struct adapter          *adapter;
4274         device_t                dev;
4275         struct ixgbe_hw         *hw;
4276         ixgbe_link_speed        speed = 0;
4277
4278         adapter = (struct adapter *) arg1;
4279         dev = adapter->dev;
4280         hw = &adapter->hw;
4281
4282         requested = adapter->advertise;
4283         error = sysctl_handle_int(oidp, &requested, 0, req);
4284         if ((error) || (req->newptr == NULL))
4285                 return (error);
4286
4287         /* Checks to validate new value */
4288         if (adapter->advertise == requested) /* no change */
4289                 return (0);
4290
4291         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4292             (hw->phy.multispeed_fiber))) {
4293                 device_printf(dev,
4294                     "Advertised speed can only be set on copper or "
4295                     "multispeed fiber media types.\n");
4296                 return (EINVAL);
4297         }
4298
4299         if (requested < 0x1 || requested > 0x7) {
4300                 device_printf(dev,
4301                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4302                 return (EINVAL);
4303         }
4304
4305         if ((requested & 0x1)
4306             && (hw->mac.type != ixgbe_mac_X540)
4307             && (hw->mac.type != ixgbe_mac_X550)) {
4308                 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4309                 return (EINVAL);
4310         }
4311
4312         /* Set new value and report new advertised mode */
4313         if (requested & 0x1)
4314                 speed |= IXGBE_LINK_SPEED_100_FULL;
4315         if (requested & 0x2)
4316                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4317         if (requested & 0x4)
4318                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4319
4320         hw->mac.autotry_restart = TRUE;
4321         hw->mac.ops.setup_link(hw, speed, TRUE);
4322         adapter->advertise = requested;
4323
4324         return (error);
4325 }
4326
4327 /*
4328  * The following two sysctls are for X550 BaseT devices;
4329  * they deal with the external PHY used in them.
4330  */
4331 static int
4332 ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
4333 {
4334         struct adapter  *adapter = (struct adapter *) arg1;
4335         struct ixgbe_hw *hw = &adapter->hw;
4336         u16 reg;
4337
4338         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4339                 device_printf(adapter->dev,
4340                     "Device has no supported external thermal sensor.\n");
4341                 return (ENODEV);
4342         }
4343
4344         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
4345                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4346                                       &reg)) {
4347                 device_printf(adapter->dev,
4348                     "Error reading from PHY's current temperature register\n");
4349                 return (EAGAIN);
4350         }
4351
4352         /* Shift temp for output */
4353         reg = reg >> 8;
4354
4355         return (sysctl_handle_int(oidp, NULL, reg, req));
4356 }
4357
4358 /*
4359  * Reports whether the current PHY temperature is over
4360  * the overtemp threshold.
4361  *  - This is reported directly from the PHY
4362  */
4363 static int
4364 ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
4365 {
4366         struct adapter  *adapter = (struct adapter *) arg1;
4367         struct ixgbe_hw *hw = &adapter->hw;
4368         u16 reg;
4369
4370         if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
4371                 device_printf(adapter->dev,
4372                     "Device has no supported external thermal sensor.\n");
4373                 return (ENODEV);
4374         }
4375
4376         if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
4377                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
4378                                       &reg)) {
4379                 device_printf(adapter->dev,
4380                     "Error reading from PHY's temperature status register\n");
4381                 return (EAGAIN);
4382         }
4383
4384         /* Get occurrence bit */
4385         reg = !!(reg & 0x4000);
4386         return (sysctl_handle_int(oidp, 0, reg, req));
4387 }
4388
4389 /*
4390 ** Thermal Shutdown Trigger (internal MAC)
4391 **   - Set this to 1 to cause an overtemp event to occur
4392 */
4393 static int
4394 ixgbe_sysctl_thermal_test(SYSCTL_HANDLER_ARGS)
4395 {
4396         struct adapter  *adapter = (struct adapter *) arg1;
4397         struct ixgbe_hw *hw = &adapter->hw;
4398         int error, fire = 0;
4399
4400         error = sysctl_handle_int(oidp, &fire, 0, req);
4401         if ((error) || (req->newptr == NULL))
4402                 return (error);
4403
4404         if (fire) {
4405                 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4406                 reg |= IXGBE_EICR_TS;
4407                 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4408         }
4409
4410         return (0);
4411 }
4412
4413 /*
4414 ** Manage DMA Coalescing.
4415 ** Control values:
4416 **      0/1 - off / on (use default value of 1000)
4417 **
4418 **      Legal timer values are:
4419 **      50,100,250,500,1000,2000,5000,10000
4420 **
4421 **      Turning off interrupt moderation will also turn this off.
4422 */
4423 static int
4424 ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
4425 {
4426         struct adapter *adapter = (struct adapter *) arg1;
4427         struct ixgbe_hw *hw = &adapter->hw;
4428         struct ifnet *ifp = adapter->ifp;
4429         int             error;
4430         u16             oldval;
4431
4432         oldval = adapter->dmac;
4433         error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
4434         if ((error) || (req->newptr == NULL))
4435                 return (error);
4436
4437         switch (hw->mac.type) {
4438         case ixgbe_mac_X550:
4439         case ixgbe_mac_X550EM_x:
4440                 break;
4441         default:
4442                 device_printf(adapter->dev,
4443                     "DMA Coalescing is only supported on X550 devices\n");
4444                 return (ENODEV);
4445         }
4446
4447         switch (adapter->dmac) {
4448         case 0:
4449                 /* Disabled */
4450                 break;
4451         case 1: /* Enable and use default */
4452                 adapter->dmac = 1000;
4453                 break;
4454         case 50:
4455         case 100:
4456         case 250:
4457         case 500:
4458         case 1000:
4459         case 2000:
4460         case 5000:
4461         case 10000:
4462                 /* Legal values - allow */
4463                 break;
4464         default:
4465                 /* Do nothing, illegal value */
4466                 adapter->dmac = oldval;
4467                 return (EINVAL);
4468         }
4469
4470         /* Re-initialize hardware if it's already running */
4471         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4472                 ixgbe_init(adapter);
4473
4474         return (0);
4475 }
4476
4477 /*
4478  * Sysctl to enable/disable the WoL capability, if supported by the adapter.
4479  * Values:
4480  *      0 - disabled
4481  *      1 - enabled
4482  */
4483 static int
4484 ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
4485 {
4486         struct adapter *adapter = (struct adapter *) arg1;
4487         struct ixgbe_hw *hw = &adapter->hw;
4488         int new_wol_enabled;
4489         int error = 0;
4490
4491         new_wol_enabled = hw->wol_enabled;
4492         error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
4493         if ((error) || (req->newptr == NULL))
4494                 return (error);
4495         if (new_wol_enabled == hw->wol_enabled)
4496                 return (0);
4497
4498         if (new_wol_enabled > 0 && !adapter->wol_support)
4499                 return (ENODEV);
4500         else
4501                 hw->wol_enabled = !!(new_wol_enabled);
4502
4503         return (0);
4504 }
4505
4506 /*
4507  * Sysctl to enable/disable the Energy Efficient Ethernet capability,
4508  * if supported by the adapter.
4509  * Values:
4510  *      0 - disabled
4511  *      1 - enabled
4512  */
4513 static int
4514 ixgbe_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4515 {
4516         struct adapter *adapter = (struct adapter *) arg1;
4517         struct ifnet *ifp = adapter->ifp;
4518         int new_eee_enabled, error = 0;
4519
4520         new_eee_enabled = adapter->eee_enabled;
4521         error = sysctl_handle_int(oidp, &new_eee_enabled, 0, req);
4522         if ((error) || (req->newptr == NULL))
4523                 return (error);
4524         if (new_eee_enabled == adapter->eee_enabled)
4525                 return (0);
4526
4527         if (new_eee_enabled > 0 && !adapter->eee_support)
4528                 return (ENODEV);
4529         else
4530                 adapter->eee_enabled = !!(new_eee_enabled);
4531
4532         /* Re-initialize hardware if it's already running */
4533         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
4534                 ixgbe_init(adapter);
4535
4536         return (0);
4537 }
4538
4539 /*
4540  * Read-only sysctl indicating whether EEE support was negotiated
4541  * on the link.
4542  */
4543 static int
4544 ixgbe_sysctl_eee_negotiated(SYSCTL_HANDLER_ARGS)
4545 {
4546         struct adapter *adapter = (struct adapter *) arg1;
4547         struct ixgbe_hw *hw = &adapter->hw;
4548         bool status;
4549
4550         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) & IXGBE_EEE_STAT_NEG);
4551
4552         return (sysctl_handle_int(oidp, 0, status, req));
4553 }
4554
4555 /*
4556  * Read-only sysctl indicating whether RX Link is in LPI state.
4557  */
4558 static int
4559 ixgbe_sysctl_eee_rx_lpi_status(SYSCTL_HANDLER_ARGS)
4560 {
4561         struct adapter *adapter = (struct adapter *) arg1;
4562         struct ixgbe_hw *hw = &adapter->hw;
4563         bool status;
4564
4565         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4566             IXGBE_EEE_RX_LPI_STATUS);
4567
4568         return (sysctl_handle_int(oidp, 0, status, req));
4569 }
4570
4571 /*
4572  * Read-only sysctl indicating whether TX Link is in LPI state.
4573  */
4574 static int
4575 ixgbe_sysctl_eee_tx_lpi_status(SYSCTL_HANDLER_ARGS)
4576 {
4577         struct adapter *adapter = (struct adapter *) arg1;
4578         struct ixgbe_hw *hw = &adapter->hw;
4579         bool status;
4580
4581         status = !!(IXGBE_READ_REG(hw, IXGBE_EEE_STAT) &
4582             IXGBE_EEE_TX_LPI_STATUS);
4583
4584         return (sysctl_handle_int(oidp, 0, status, req));
4585 }
4586
4587 /*
4588  * Sysctl to enable/disable the types of packets that the
4589  * adapter will wake up on upon receipt.
4590  * WUFC - Wake Up Filter Control
4591  * Flags:
4592  *      0x1  - Link Status Change
4593  *      0x2  - Magic Packet
4594  *      0x4  - Direct Exact
4595  *      0x8  - Directed Multicast
4596  *      0x10 - Broadcast
4597  *      0x20 - ARP/IPv4 Request Packet
4598  *      0x40 - Direct IPv4 Packet
4599  *      0x80 - Direct IPv6 Packet
4600  *
4601  * Setting another flag will cause the sysctl to return an
4602  * error.
4603  */
4604 static int
4605 ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
4606 {
4607         struct adapter *adapter = (struct adapter *) arg1;
4608         int error = 0;
4609         u32 new_wufc;
4610
4611         new_wufc = adapter->wufc;
4612
4613         error = sysctl_handle_int(oidp, &new_wufc, 0, req);
4614         if ((error) || (req->newptr == NULL))
4615                 return (error);
4616         if (new_wufc == adapter->wufc)
4617                 return (0);
4618
4619         if (new_wufc & 0xffffff00)
4620                 return (EINVAL);
4621         else {
4622                 new_wufc &= 0xff;
4623                 new_wufc |= (0xffffff & adapter->wufc);
4624                 adapter->wufc = new_wufc;
4625         }
4626
4627         return (0);
4628 }
4629
4630 /*
4631 ** Enable the hardware to drop packets when the buffer is
4632 ** full. This is useful when multiqueue,so that no single
4633 ** queue being full stalls the entire RX engine. We only
4634 ** enable this when Multiqueue AND when Flow Control is 
4635 ** disabled.
4636 */
4637 static void
4638 ixgbe_enable_rx_drop(struct adapter *adapter)
4639 {
4640         struct ixgbe_hw *hw = &adapter->hw;
4641
4642         for (int i = 0; i < adapter->num_queues; i++) {
4643                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4644                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4645                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4646         }
4647 }
4648
4649 static void
4650 ixgbe_disable_rx_drop(struct adapter *adapter)
4651 {
4652         struct ixgbe_hw *hw = &adapter->hw;
4653
4654         for (int i = 0; i < adapter->num_queues; i++) {
4655                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4656                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4657                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4658         }
4659 }
4660
4661 static void
4662 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4663 {
4664         u32 mask;
4665
4666         switch (adapter->hw.mac.type) {
4667         case ixgbe_mac_82598EB:
4668                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4669                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4670                 break;
4671         case ixgbe_mac_82599EB:
4672         case ixgbe_mac_X540:
4673         case ixgbe_mac_X550:
4674         case ixgbe_mac_X550EM_x:
4675                 mask = (queues & 0xFFFFFFFF);
4676                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4677                 mask = (queues >> 32);
4678                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4679                 break;
4680         default:
4681                 break;
4682         }
4683 }
4684
4685