]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ix.c
Merge wpa_supplicant/hostapd 2.4.
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ix.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2015, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #ifndef IXGBE_STANDALONE_BUILD
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_rss.h"
40 #endif
41
42 #include "ixgbe.h"
43
44 #ifdef  RSS
45 #include <net/rss_config.h>
46 #include <netinet/in_rss.h>
47 #endif
48
49 /*********************************************************************
50  *  Set this to one to display debug statistics
51  *********************************************************************/
52 int             ixgbe_display_debug_stats = 0;
53
54 /*********************************************************************
55  *  Driver version
56  *********************************************************************/
57 char ixgbe_driver_version[] = "2.7.4";
58
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *  Last field stores an index into ixgbe_strings
64  *  Last entry must be all 0s
65  *
66  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
67  *********************************************************************/
68
69 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
70 {
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
86         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
87         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
88         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
89         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
90         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
91         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
92         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
93         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
94         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
95         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
96         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
97         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
98         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
99         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
100         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
101         /* required last entry */
102         {0, 0, 0, 0, 0}
103 };
104
105 /*********************************************************************
106  *  Table of branding strings
107  *********************************************************************/
108
109 static char    *ixgbe_strings[] = {
110         "Intel(R) PRO/10GbE PCI-Express Network Driver"
111 };
112
113 /*********************************************************************
114  *  Function prototypes
115  *********************************************************************/
116 static int      ixgbe_probe(device_t);
117 static int      ixgbe_attach(device_t);
118 static int      ixgbe_detach(device_t);
119 static int      ixgbe_shutdown(device_t);
120 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
121 static void     ixgbe_init(void *);
122 static void     ixgbe_init_locked(struct adapter *);
123 static void     ixgbe_stop(void *);
124 #if __FreeBSD_version >= 1100036
125 static uint64_t ixgbe_get_counter(struct ifnet *, ift_counter);
126 #endif
127 static void     ixgbe_add_media_types(struct adapter *);
128 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
129 static int      ixgbe_media_change(struct ifnet *);
130 static void     ixgbe_identify_hardware(struct adapter *);
131 static int      ixgbe_allocate_pci_resources(struct adapter *);
132 static void     ixgbe_get_slot_info(struct ixgbe_hw *);
133 static int      ixgbe_allocate_msix(struct adapter *);
134 static int      ixgbe_allocate_legacy(struct adapter *);
135 static int      ixgbe_setup_msix(struct adapter *);
136 static void     ixgbe_free_pci_resources(struct adapter *);
137 static void     ixgbe_local_timer(void *);
138 static int      ixgbe_setup_interface(device_t, struct adapter *);
139 static void     ixgbe_config_link(struct adapter *);
140 static void     ixgbe_rearm_queues(struct adapter *, u64);
141
142 static void     ixgbe_initialize_transmit_units(struct adapter *);
143 static void     ixgbe_initialize_receive_units(struct adapter *);
144 static void     ixgbe_enable_rx_drop(struct adapter *);
145 static void     ixgbe_disable_rx_drop(struct adapter *);
146
147 static void     ixgbe_enable_intr(struct adapter *);
148 static void     ixgbe_disable_intr(struct adapter *);
149 static void     ixgbe_update_stats_counters(struct adapter *);
150 static void     ixgbe_set_promisc(struct adapter *);
151 static void     ixgbe_set_multi(struct adapter *);
152 static void     ixgbe_update_link_status(struct adapter *);
153 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
154 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
155 static int      ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
156 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
157 static void     ixgbe_configure_ivars(struct adapter *);
158 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
159
160 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
161 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
162 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
163
164 static void     ixgbe_add_hw_stats(struct adapter *adapter);
165
166 /* Support for pluggable optic modules */
167 static bool     ixgbe_sfp_probe(struct adapter *);
168 static void     ixgbe_setup_optics(struct adapter *);
169
170 /* Legacy (single vector interrupt handler */
171 static void     ixgbe_legacy_irq(void *);
172
173 /* The MSI/X Interrupt handlers */
174 static void     ixgbe_msix_que(void *);
175 static void     ixgbe_msix_link(void *);
176
177 /* Deferred interrupt tasklets */
178 static void     ixgbe_handle_que(void *, int);
179 static void     ixgbe_handle_link(void *, int);
180 static void     ixgbe_handle_msf(void *, int);
181 static void     ixgbe_handle_mod(void *, int);
182
183 #ifdef IXGBE_FDIR
184 static void     ixgbe_reinit_fdir(void *, int);
185 #endif
186
187
188 /* Missing shared code prototype */
189 extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
190
191 /*********************************************************************
192  *  FreeBSD Device Interface Entry Points
193  *********************************************************************/
194
195 static device_method_t ix_methods[] = {
196         /* Device interface */
197         DEVMETHOD(device_probe, ixgbe_probe),
198         DEVMETHOD(device_attach, ixgbe_attach),
199         DEVMETHOD(device_detach, ixgbe_detach),
200         DEVMETHOD(device_shutdown, ixgbe_shutdown),
201         DEVMETHOD_END
202 };
203
204 static driver_t ix_driver = {
205         "ix", ix_methods, sizeof(struct adapter),
206 };
207
208 devclass_t ix_devclass;
209 DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
210
211 MODULE_DEPEND(ix, pci, 1, 1, 1);
212 MODULE_DEPEND(ix, ether, 1, 1, 1);
213
214 /*
215 ** TUNEABLE PARAMETERS:
216 */
217
218 static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD, 0,
219                    "IXGBE driver parameters");
220
221 /*
222 ** AIM: Adaptive Interrupt Moderation
223 ** which means that the interrupt rate
224 ** is varied over time based on the
225 ** traffic for that interrupt vector
226 */
227 static int ixgbe_enable_aim = TRUE;
228 SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
229     "Enable adaptive interrupt moderation");
230
231 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
232 SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
233     &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
234
235 /* How many packets rxeof tries to clean at a time */
236 static int ixgbe_rx_process_limit = 256;
237 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
238 SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
239     &ixgbe_rx_process_limit, 0,
240     "Maximum number of received packets to process at a time,"
241     "-1 means unlimited");
242
243 /* How many packets txeof tries to clean at a time */
244 static int ixgbe_tx_process_limit = 256;
245 TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
246 SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
247     &ixgbe_tx_process_limit, 0,
248     "Maximum number of sent packets to process at a time,"
249     "-1 means unlimited");
250
251 /*
252 ** Smart speed setting, default to on
253 ** this only works as a compile option
254 ** right now as its during attach, set
255 ** this to 'ixgbe_smart_speed_off' to
256 ** disable.
257 */
258 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
259
260 /*
261  * MSIX should be the default for best performance,
262  * but this allows it to be forced off for testing.
263  */
264 static int ixgbe_enable_msix = 1;
265 SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
266     "Enable MSI-X interrupts");
267
268 /*
269  * Number of Queues, can be set to 0,
270  * it then autoconfigures based on the
271  * number of cpus with a max of 8. This
272  * can be overriden manually here.
273  */
274 static int ixgbe_num_queues = 0;
275 SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
276     "Number of queues to configure, 0 indicates autoconfigure");
277
278 /*
279 ** Number of TX descriptors per ring,
280 ** setting higher than RX as this seems
281 ** the better performing choice.
282 */
283 static int ixgbe_txd = PERFORM_TXD;
284 SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
285     "Number of transmit descriptors per queue");
286
287 /* Number of RX descriptors per ring */
288 static int ixgbe_rxd = PERFORM_RXD;
289 SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
290     "Number of receive descriptors per queue");
291
292 /*
293 ** Defining this on will allow the use
294 ** of unsupported SFP+ modules, note that
295 ** doing so you are on your own :)
296 */
297 static int allow_unsupported_sfp = FALSE;
298 TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
299
300 /* Keep running tab on them for sanity check */
301 static int ixgbe_total_ports;
302
303 #ifdef IXGBE_FDIR
304 /* 
305 ** Flow Director actually 'steals'
306 ** part of the packet buffer as its
307 ** filter pool, this variable controls
308 ** how much it uses:
309 **  0 = 64K, 1 = 128K, 2 = 256K
310 */
311 static int fdir_pballoc = 1;
312 #endif
313
314 #ifdef DEV_NETMAP
315 /*
316  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
317  * be a reference on how to implement netmap support in a driver.
318  * Additional comments are in ixgbe_netmap.h .
319  *
320  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
321  * that extend the standard driver.
322  */
323 #include <dev/netmap/ixgbe_netmap.h>
324 #endif /* DEV_NETMAP */
325
326 /*********************************************************************
327  *  Device identification routine
328  *
329  *  ixgbe_probe determines if the driver should be loaded on
330  *  adapter based on PCI vendor/device id of the adapter.
331  *
332  *  return BUS_PROBE_DEFAULT on success, positive on failure
333  *********************************************************************/
334
335 static int
336 ixgbe_probe(device_t dev)
337 {
338         ixgbe_vendor_info_t *ent;
339
340         u16     pci_vendor_id = 0;
341         u16     pci_device_id = 0;
342         u16     pci_subvendor_id = 0;
343         u16     pci_subdevice_id = 0;
344         char    adapter_name[256];
345
346         INIT_DEBUGOUT("ixgbe_probe: begin");
347
348         pci_vendor_id = pci_get_vendor(dev);
349         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
350                 return (ENXIO);
351
352         pci_device_id = pci_get_device(dev);
353         pci_subvendor_id = pci_get_subvendor(dev);
354         pci_subdevice_id = pci_get_subdevice(dev);
355
356         ent = ixgbe_vendor_info_array;
357         while (ent->vendor_id != 0) {
358                 if ((pci_vendor_id == ent->vendor_id) &&
359                     (pci_device_id == ent->device_id) &&
360
361                     ((pci_subvendor_id == ent->subvendor_id) ||
362                      (ent->subvendor_id == 0)) &&
363
364                     ((pci_subdevice_id == ent->subdevice_id) ||
365                      (ent->subdevice_id == 0))) {
366                         sprintf(adapter_name, "%s, Version - %s",
367                                 ixgbe_strings[ent->index],
368                                 ixgbe_driver_version);
369                         device_set_desc_copy(dev, adapter_name);
370                         ++ixgbe_total_ports;
371                         return (BUS_PROBE_DEFAULT);
372                 }
373                 ent++;
374         }
375         return (ENXIO);
376 }
377
378 /*********************************************************************
379  *  Device initialization routine
380  *
381  *  The attach entry point is called when the driver is being loaded.
382  *  This routine identifies the type of hardware, allocates all resources
383  *  and initializes the hardware.
384  *
385  *  return 0 on success, positive on failure
386  *********************************************************************/
387
388 static int
389 ixgbe_attach(device_t dev)
390 {
391         struct adapter *adapter;
392         struct ixgbe_hw *hw;
393         int             error = 0;
394         u16             csum;
395         u32             ctrl_ext;
396
397         INIT_DEBUGOUT("ixgbe_attach: begin");
398
399         /* Allocate, clear, and link in our adapter structure */
400         adapter = device_get_softc(dev);
401         adapter->dev = adapter->osdep.dev = dev;
402         hw = &adapter->hw;
403
404         /* Core Lock Init*/
405         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
406
407         /* SYSCTL APIs */
408         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410                         OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
411                         adapter, 0, ixgbe_set_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC);
412
413         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
414                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
415                         OID_AUTO, "enable_aim", CTLFLAG_RW,
416                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
417
418         /*
419         ** Allow a kind of speed control by forcing the autoneg
420         ** advertised speed list to only a certain value, this
421         ** supports 1G on 82599 devices, and 100Mb on x540.
422         */
423         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
424                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425                         OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
426                         adapter, 0, ixgbe_set_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED);
427
428         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
429                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430                         OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
431                         0, ixgbe_set_thermal_test, "I", "Thermal Test");
432
433         /* Set up the timer callout */
434         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
435
436         /* Determine hardware revision */
437         ixgbe_identify_hardware(adapter);
438
439         /* Do base PCI setup - map BAR0 */
440         if (ixgbe_allocate_pci_resources(adapter)) {
441                 device_printf(dev, "Allocation of PCI resources failed\n");
442                 error = ENXIO;
443                 goto err_out;
444         }
445
446         /* Do descriptor calc and sanity checks */
447         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
448             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
449                 device_printf(dev, "TXD config issue, using default!\n");
450                 adapter->num_tx_desc = DEFAULT_TXD;
451         } else
452                 adapter->num_tx_desc = ixgbe_txd;
453
454         /*
455         ** With many RX rings it is easy to exceed the
456         ** system mbuf allocation. Tuning nmbclusters
457         ** can alleviate this.
458         */
459         if (nmbclusters > 0) {
460                 int s;
461                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
462                 if (s > nmbclusters) {
463                         device_printf(dev, "RX Descriptors exceed "
464                             "system mbuf max, using default instead!\n");
465                         ixgbe_rxd = DEFAULT_RXD;
466                 }
467         }
468
469         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
470             ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
471                 device_printf(dev, "RXD config issue, using default!\n");
472                 adapter->num_rx_desc = DEFAULT_RXD;
473         } else
474                 adapter->num_rx_desc = ixgbe_rxd;
475
476         /* Allocate our TX/RX Queues */
477         if (ixgbe_allocate_queues(adapter)) {
478                 error = ENOMEM;
479                 goto err_out;
480         }
481
482         /* Allocate multicast array memory. */
483         adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
484             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
485         if (adapter->mta == NULL) {
486                 device_printf(dev, "Can not allocate multicast setup array\n");
487                 error = ENOMEM;
488                 goto err_late;
489         }
490
491         /* Initialize the shared code */
492         hw->allow_unsupported_sfp = allow_unsupported_sfp;
493         error = ixgbe_init_shared_code(hw);
494         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
495                 /*
496                 ** No optics in this port, set up
497                 ** so the timer routine will probe 
498                 ** for later insertion.
499                 */
500                 adapter->sfp_probe = TRUE;
501                 error = 0;
502         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
503                 device_printf(dev,"Unsupported SFP+ module detected!\n");
504                 error = EIO;
505                 goto err_late;
506         } else if (error) {
507                 device_printf(dev,"Unable to initialize the shared code\n");
508                 error = EIO;
509                 goto err_late;
510         }
511
512         /* Make sure we have a good EEPROM before we read from it */
513         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
514                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
515                 error = EIO;
516                 goto err_late;
517         }
518
519         error = ixgbe_init_hw(hw);
520         switch (error) {
521         case IXGBE_ERR_EEPROM_VERSION:
522                 device_printf(dev, "This device is a pre-production adapter/"
523                     "LOM.  Please be aware there may be issues associated "
524                     "with your hardware.\n If you are experiencing problems "
525                     "please contact your Intel or hardware representative "
526                     "who provided you with this hardware.\n");
527                 break;
528         case IXGBE_ERR_SFP_NOT_SUPPORTED:
529                 device_printf(dev,"Unsupported SFP+ Module\n");
530                 error = EIO;
531                 goto err_late;
532         case IXGBE_ERR_SFP_NOT_PRESENT:
533                 device_printf(dev,"No SFP+ Module found\n");
534                 /* falls thru */
535         default:
536                 break;
537         }
538
539         /* Detect and set physical type */
540         ixgbe_setup_optics(adapter);
541
542         if ((adapter->msix > 1) && (ixgbe_enable_msix))
543                 error = ixgbe_allocate_msix(adapter); 
544         else
545                 error = ixgbe_allocate_legacy(adapter); 
546         if (error) 
547                 goto err_late;
548
549         /* Setup OS specific network interface */
550         if (ixgbe_setup_interface(dev, adapter) != 0)
551                 goto err_late;
552
553         /* Initialize statistics */
554         ixgbe_update_stats_counters(adapter);
555
556         /* Register for VLAN events */
557         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
558             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
559         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
560             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
561
562         /*
563         ** Check PCIE slot type/speed/width
564         */
565         ixgbe_get_slot_info(hw);
566
567
568         /* Set an initial default flow control value */
569         adapter->fc = ixgbe_fc_full;
570
571         /* let hardware know driver is loaded */
572         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
573         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
574         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
575
576         ixgbe_add_hw_stats(adapter);
577
578 #ifdef DEV_NETMAP
579         ixgbe_netmap_attach(adapter);
580 #endif /* DEV_NETMAP */
581         INIT_DEBUGOUT("ixgbe_attach: end");
582         return (0);
583
584 err_late:
585         ixgbe_free_transmit_structures(adapter);
586         ixgbe_free_receive_structures(adapter);
587 err_out:
588         if (adapter->ifp != NULL)
589                 if_free(adapter->ifp);
590         ixgbe_free_pci_resources(adapter);
591         free(adapter->mta, M_DEVBUF);
592         return (error);
593 }
594
595 /*********************************************************************
596  *  Device removal routine
597  *
598  *  The detach entry point is called when the driver is being removed.
599  *  This routine stops the adapter and deallocates all the resources
600  *  that were allocated for driver operation.
601  *
602  *  return 0 on success, positive on failure
603  *********************************************************************/
604
605 static int
606 ixgbe_detach(device_t dev)
607 {
608         struct adapter *adapter = device_get_softc(dev);
609         struct ix_queue *que = adapter->queues;
610         struct tx_ring *txr = adapter->tx_rings;
611         u32     ctrl_ext;
612
613         INIT_DEBUGOUT("ixgbe_detach: begin");
614
615         /* Make sure VLANS are not using driver */
616         if (adapter->ifp->if_vlantrunk != NULL) {
617                 device_printf(dev,"Vlan in use, detach first\n");
618                 return (EBUSY);
619         }
620
621         IXGBE_CORE_LOCK(adapter);
622         ixgbe_stop(adapter);
623         IXGBE_CORE_UNLOCK(adapter);
624
625         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
626                 if (que->tq) {
627 #ifndef IXGBE_LEGACY_TX
628                         taskqueue_drain(que->tq, &txr->txq_task);
629 #endif
630                         taskqueue_drain(que->tq, &que->que_task);
631                         taskqueue_free(que->tq);
632                 }
633         }
634
635         /* Drain the Link queue */
636         if (adapter->tq) {
637                 taskqueue_drain(adapter->tq, &adapter->link_task);
638                 taskqueue_drain(adapter->tq, &adapter->mod_task);
639                 taskqueue_drain(adapter->tq, &adapter->msf_task);
640 #ifdef IXGBE_FDIR
641                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
642 #endif
643                 taskqueue_free(adapter->tq);
644         }
645
646         /* let hardware know driver is unloading */
647         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
648         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
649         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
650
651         /* Unregister VLAN events */
652         if (adapter->vlan_attach != NULL)
653                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
654         if (adapter->vlan_detach != NULL)
655                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
656
657         ether_ifdetach(adapter->ifp);
658         callout_drain(&adapter->timer);
659 #ifdef DEV_NETMAP
660         netmap_detach(adapter->ifp);
661 #endif /* DEV_NETMAP */
662         ixgbe_free_pci_resources(adapter);
663         bus_generic_detach(dev);
664         if_free(adapter->ifp);
665
666         ixgbe_free_transmit_structures(adapter);
667         ixgbe_free_receive_structures(adapter);
668         free(adapter->mta, M_DEVBUF);
669
670         IXGBE_CORE_LOCK_DESTROY(adapter);
671         return (0);
672 }
673
674 /*********************************************************************
675  *
676  *  Shutdown entry point
677  *
678  **********************************************************************/
679
680 static int
681 ixgbe_shutdown(device_t dev)
682 {
683         struct adapter *adapter = device_get_softc(dev);
684         IXGBE_CORE_LOCK(adapter);
685         ixgbe_stop(adapter);
686         IXGBE_CORE_UNLOCK(adapter);
687         return (0);
688 }
689
690
691 /*********************************************************************
692  *  Ioctl entry point
693  *
694  *  ixgbe_ioctl is called when the user wants to configure the
695  *  interface.
696  *
697  *  return 0 on success, positive on failure
698  **********************************************************************/
699
700 static int
701 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
702 {
703         struct adapter  *adapter = ifp->if_softc;
704         struct ifreq    *ifr = (struct ifreq *) data;
705 #if defined(INET) || defined(INET6)
706         struct ifaddr *ifa = (struct ifaddr *)data;
707         bool            avoid_reset = FALSE;
708 #endif
709         int             error = 0;
710
711         switch (command) {
712
713         case SIOCSIFADDR:
714 #ifdef INET
715                 if (ifa->ifa_addr->sa_family == AF_INET)
716                         avoid_reset = TRUE;
717 #endif
718 #ifdef INET6
719                 if (ifa->ifa_addr->sa_family == AF_INET6)
720                         avoid_reset = TRUE;
721 #endif
722 #if defined(INET) || defined(INET6)
723                 /*
724                 ** Calling init results in link renegotiation,
725                 ** so we avoid doing it when possible.
726                 */
727                 if (avoid_reset) {
728                         ifp->if_flags |= IFF_UP;
729                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
730                                 ixgbe_init(adapter);
731                         if (!(ifp->if_flags & IFF_NOARP))
732                                 arp_ifinit(ifp, ifa);
733                 } else
734                         error = ether_ioctl(ifp, command, data);
735 #endif
736                 break;
737         case SIOCSIFMTU:
738                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
739                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
740                         error = EINVAL;
741                 } else {
742                         IXGBE_CORE_LOCK(adapter);
743                         ifp->if_mtu = ifr->ifr_mtu;
744                         adapter->max_frame_size =
745                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
746                         ixgbe_init_locked(adapter);
747                         IXGBE_CORE_UNLOCK(adapter);
748                 }
749                 break;
750         case SIOCSIFFLAGS:
751                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
752                 IXGBE_CORE_LOCK(adapter);
753                 if (ifp->if_flags & IFF_UP) {
754                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
755                                 if ((ifp->if_flags ^ adapter->if_flags) &
756                                     (IFF_PROMISC | IFF_ALLMULTI)) {
757                                         ixgbe_set_promisc(adapter);
758                                 }
759                         } else
760                                 ixgbe_init_locked(adapter);
761                 } else
762                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
763                                 ixgbe_stop(adapter);
764                 adapter->if_flags = ifp->if_flags;
765                 IXGBE_CORE_UNLOCK(adapter);
766                 break;
767         case SIOCADDMULTI:
768         case SIOCDELMULTI:
769                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
770                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
771                         IXGBE_CORE_LOCK(adapter);
772                         ixgbe_disable_intr(adapter);
773                         ixgbe_set_multi(adapter);
774                         ixgbe_enable_intr(adapter);
775                         IXGBE_CORE_UNLOCK(adapter);
776                 }
777                 break;
778         case SIOCSIFMEDIA:
779         case SIOCGIFMEDIA:
780                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
781                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
782                 break;
783         case SIOCSIFCAP:
784         {
785                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
786                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
787                 if (mask & IFCAP_HWCSUM)
788                         ifp->if_capenable ^= IFCAP_HWCSUM;
789                 if (mask & IFCAP_TSO4)
790                         ifp->if_capenable ^= IFCAP_TSO4;
791                 if (mask & IFCAP_TSO6)
792                         ifp->if_capenable ^= IFCAP_TSO6;
793                 if (mask & IFCAP_LRO)
794                         ifp->if_capenable ^= IFCAP_LRO;
795                 if (mask & IFCAP_VLAN_HWTAGGING)
796                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
797                 if (mask & IFCAP_VLAN_HWFILTER)
798                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
799                 if (mask & IFCAP_VLAN_HWTSO)
800                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
801                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
802                         IXGBE_CORE_LOCK(adapter);
803                         ixgbe_init_locked(adapter);
804                         IXGBE_CORE_UNLOCK(adapter);
805                 }
806                 VLAN_CAPABILITIES(ifp);
807                 break;
808         }
809 #if __FreeBSD_version >= 1100036
810         case SIOCGI2C:
811         {
812                 struct ixgbe_hw *hw = &adapter->hw;
813                 struct ifi2creq i2c;
814                 int i;
815                 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
816                 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
817                 if (error != 0)
818                         break;
819                 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
820                         error = EINVAL;
821                         break;
822                 }
823                 if (i2c.len > sizeof(i2c.data)) {
824                         error = EINVAL;
825                         break;
826                 }
827
828                 for (i = 0; i < i2c.len; i++)
829                         hw->phy.ops.read_i2c_byte(hw, i2c.offset + i,
830                             i2c.dev_addr, &i2c.data[i]);
831                 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
832                 break;
833         }
834 #endif
835         default:
836                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
837                 error = ether_ioctl(ifp, command, data);
838                 break;
839         }
840
841         return (error);
842 }
843
844 /*********************************************************************
845  *  Init entry point
846  *
847  *  This routine is used in two ways. It is used by the stack as
848  *  init entry point in network interface structure. It is also used
849  *  by the driver as a hw/sw initialization routine to get to a
850  *  consistent state.
851  *
852  *  return 0 on success, positive on failure
853  **********************************************************************/
854 #define IXGBE_MHADD_MFS_SHIFT 16
855
856 static void
857 ixgbe_init_locked(struct adapter *adapter)
858 {
859         struct ifnet   *ifp = adapter->ifp;
860         device_t        dev = adapter->dev;
861         struct ixgbe_hw *hw = &adapter->hw;
862         u32             k, txdctl, mhadd, gpie;
863         u32             rxdctl, rxctrl;
864
865         mtx_assert(&adapter->core_mtx, MA_OWNED);
866         INIT_DEBUGOUT("ixgbe_init_locked: begin");
867         hw->adapter_stopped = FALSE;
868         ixgbe_stop_adapter(hw);
869         callout_stop(&adapter->timer);
870
871         /* reprogram the RAR[0] in case user changed it. */
872         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
873
874         /* Get the latest mac address, User can use a LAA */
875         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
876               IXGBE_ETH_LENGTH_OF_ADDRESS);
877         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
878         hw->addr_ctrl.rar_used_count = 1;
879
880         /* Set the various hardware offload abilities */
881         ifp->if_hwassist = 0;
882         if (ifp->if_capenable & IFCAP_TSO)
883                 ifp->if_hwassist |= CSUM_TSO;
884         if (ifp->if_capenable & IFCAP_TXCSUM) {
885                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
886 #if __FreeBSD_version >= 800000
887                 if (hw->mac.type != ixgbe_mac_82598EB)
888                         ifp->if_hwassist |= CSUM_SCTP;
889 #endif
890         }
891
892         /* Prepare transmit descriptors and buffers */
893         if (ixgbe_setup_transmit_structures(adapter)) {
894                 device_printf(dev,"Could not setup transmit structures\n");
895                 ixgbe_stop(adapter);
896                 return;
897         }
898
899         ixgbe_init_hw(hw);
900         ixgbe_initialize_transmit_units(adapter);
901
902         /* Setup Multicast table */
903         ixgbe_set_multi(adapter);
904
905         /*
906         ** Determine the correct mbuf pool
907         ** for doing jumbo frames
908         */
909         if (adapter->max_frame_size <= 2048)
910                 adapter->rx_mbuf_sz = MCLBYTES;
911         else if (adapter->max_frame_size <= 4096)
912                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
913         else if (adapter->max_frame_size <= 9216)
914                 adapter->rx_mbuf_sz = MJUM9BYTES;
915         else
916                 adapter->rx_mbuf_sz = MJUM16BYTES;
917
918         /* Prepare receive descriptors and buffers */
919         if (ixgbe_setup_receive_structures(adapter)) {
920                 device_printf(dev,"Could not setup receive structures\n");
921                 ixgbe_stop(adapter);
922                 return;
923         }
924
925         /* Configure RX settings */
926         ixgbe_initialize_receive_units(adapter);
927
928         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
929
930         /* Enable Fan Failure Interrupt */
931         gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
932
933         /* Add for Module detection */
934         if (hw->mac.type == ixgbe_mac_82599EB)
935                 gpie |= IXGBE_SDP2_GPIEN_BY_MAC(hw);
936
937         /* Thermal Failure Detection */
938         if (hw->mac.type == ixgbe_mac_X540)
939                 gpie |= IXGBE_SDP0_GPIEN_BY_MAC(hw);
940
941         if (adapter->msix > 1) {
942                 /* Enable Enhanced MSIX mode */
943                 gpie |= IXGBE_GPIE_MSIX_MODE;
944                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
945                     IXGBE_GPIE_OCD;
946         }
947         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
948
949         /* Set MTU size */
950         if (ifp->if_mtu > ETHERMTU) {
951                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
952                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
953                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
954                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
955         }
956         
957         /* Now enable all the queues */
958
959         for (int i = 0; i < adapter->num_queues; i++) {
960                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
961                 txdctl |= IXGBE_TXDCTL_ENABLE;
962                 /* Set WTHRESH to 8, burst writeback */
963                 txdctl |= (8 << 16);
964                 /*
965                  * When the internal queue falls below PTHRESH (32),
966                  * start prefetching as long as there are at least
967                  * HTHRESH (1) buffers ready. The values are taken
968                  * from the Intel linux driver 3.8.21.
969                  * Prefetching enables tx line rate even with 1 queue.
970                  */
971                 txdctl |= (32 << 0) | (1 << 8);
972                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
973         }
974
975         for (int i = 0; i < adapter->num_queues; i++) {
976                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
977                 if (hw->mac.type == ixgbe_mac_82598EB) {
978                         /*
979                         ** PTHRESH = 21
980                         ** HTHRESH = 4
981                         ** WTHRESH = 8
982                         */
983                         rxdctl &= ~0x3FFFFF;
984                         rxdctl |= 0x080420;
985                 }
986                 rxdctl |= IXGBE_RXDCTL_ENABLE;
987                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
988                 for (k = 0; k < 10; k++) {
989                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
990                             IXGBE_RXDCTL_ENABLE)
991                                 break;
992                         else
993                                 msec_delay(1);
994                 }
995                 wmb();
996 #ifdef DEV_NETMAP
997                 /*
998                  * In netmap mode, we must preserve the buffers made
999                  * available to userspace before the if_init()
1000                  * (this is true by default on the TX side, because
1001                  * init makes all buffers available to userspace).
1002                  *
1003                  * netmap_reset() and the device specific routines
1004                  * (e.g. ixgbe_setup_receive_rings()) map these
1005                  * buffers at the end of the NIC ring, so here we
1006                  * must set the RDT (tail) register to make sure
1007                  * they are not overwritten.
1008                  *
1009                  * In this driver the NIC ring starts at RDH = 0,
1010                  * RDT points to the last slot available for reception (?),
1011                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1012                  */
1013                 if (ifp->if_capenable & IFCAP_NETMAP) {
1014                         struct netmap_adapter *na = NA(adapter->ifp);
1015                         struct netmap_kring *kring = &na->rx_rings[i];
1016                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1017
1018                         IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1019                 } else
1020 #endif /* DEV_NETMAP */
1021                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1022         }
1023
1024         /* Enable Receive engine */
1025         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1026         if (hw->mac.type == ixgbe_mac_82598EB)
1027                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1028         rxctrl |= IXGBE_RXCTRL_RXEN;
1029         ixgbe_enable_rx_dma(hw, rxctrl);
1030
1031         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1032
1033         /* Set up MSI/X routing */
1034         if (ixgbe_enable_msix)  {
1035                 ixgbe_configure_ivars(adapter);
1036                 /* Set up auto-mask */
1037                 if (hw->mac.type == ixgbe_mac_82598EB)
1038                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1039                 else {
1040                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1041                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1042                 }
1043         } else {  /* Simple settings for Legacy/MSI */
1044                 ixgbe_set_ivar(adapter, 0, 0, 0);
1045                 ixgbe_set_ivar(adapter, 0, 0, 1);
1046                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1047         }
1048
1049 #ifdef IXGBE_FDIR
1050         /* Init Flow director */
1051         if (hw->mac.type != ixgbe_mac_82598EB) {
1052                 u32 hdrm = 32 << fdir_pballoc;
1053
1054                 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1055                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1056         }
1057 #endif
1058
1059         /*
1060         ** Check on any SFP devices that
1061         ** need to be kick-started
1062         */
1063         if (hw->phy.type == ixgbe_phy_none) {
1064                 int err = hw->phy.ops.identify(hw);
1065                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1066                         device_printf(dev,
1067                             "Unsupported SFP+ module type was detected.\n");
1068                         return;
1069                 }
1070         }
1071
1072         /* Set moderation on the Link interrupt */
1073         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
1074
1075         /* Config/Enable Link */
1076         ixgbe_config_link(adapter);
1077
1078         /* Hardware Packet Buffer & Flow Control setup */
1079         {
1080                 u32 rxpb, frame, size, tmp;
1081
1082                 frame = adapter->max_frame_size;
1083
1084                 /* Calculate High Water */
1085                 switch (hw->mac.type) {
1086                 case ixgbe_mac_X540:
1087                 case ixgbe_mac_X550:
1088                 case ixgbe_mac_X550EM_a:
1089                 case ixgbe_mac_X550EM_x:
1090                         tmp = IXGBE_DV_X540(frame, frame);
1091                         break;
1092                 default:
1093                         tmp = IXGBE_DV(frame, frame);
1094                         break;
1095                 }
1096                 size = IXGBE_BT2KB(tmp);
1097                 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1098                 hw->fc.high_water[0] = rxpb - size;
1099
1100                 /* Now calculate Low Water */
1101                 switch (hw->mac.type) {
1102                 case ixgbe_mac_X540:
1103                 case ixgbe_mac_X550:
1104                 case ixgbe_mac_X550EM_a:
1105                 case ixgbe_mac_X550EM_x:
1106                         tmp = IXGBE_LOW_DV_X540(frame);
1107                         break;
1108                 default:
1109                         tmp = IXGBE_LOW_DV(frame);
1110                         break;
1111                 }
1112                 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1113                 
1114                 hw->fc.requested_mode = adapter->fc;
1115                 hw->fc.pause_time = IXGBE_FC_PAUSE;
1116                 hw->fc.send_xon = TRUE;
1117         }
1118         /* Initialize the FC settings */
1119         ixgbe_start_hw(hw);
1120
1121         /* Set up VLAN support and filter */
1122         ixgbe_setup_vlan_hw_support(adapter);
1123
1124         /* And now turn on interrupts */
1125         ixgbe_enable_intr(adapter);
1126
1127         /* Now inform the stack we're ready */
1128         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1129
1130         return;
1131 }
1132
1133 static void
1134 ixgbe_init(void *arg)
1135 {
1136         struct adapter *adapter = arg;
1137
1138         IXGBE_CORE_LOCK(adapter);
1139         ixgbe_init_locked(adapter);
1140         IXGBE_CORE_UNLOCK(adapter);
1141         return;
1142 }
1143
1144
1145 /*
1146 **
1147 ** MSIX Interrupt Handlers and Tasklets
1148 **
1149 */
1150
1151 static inline void
1152 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1153 {
1154         struct ixgbe_hw *hw = &adapter->hw;
1155         u64     queue = (u64)(1 << vector);
1156         u32     mask;
1157
1158         if (hw->mac.type == ixgbe_mac_82598EB) {
1159                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1160                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1161         } else {
1162                 mask = (queue & 0xFFFFFFFF);
1163                 if (mask)
1164                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1165                 mask = (queue >> 32);
1166                 if (mask)
1167                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1168         }
1169 }
1170
1171 static inline void
1172 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1173 {
1174         struct ixgbe_hw *hw = &adapter->hw;
1175         u64     queue = (u64)(1 << vector);
1176         u32     mask;
1177
1178         if (hw->mac.type == ixgbe_mac_82598EB) {
1179                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1180                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1181         } else {
1182                 mask = (queue & 0xFFFFFFFF);
1183                 if (mask)
1184                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1185                 mask = (queue >> 32);
1186                 if (mask)
1187                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1188         }
1189 }
1190
1191 static void
1192 ixgbe_handle_que(void *context, int pending)
1193 {
1194         struct ix_queue *que = context;
1195         struct adapter  *adapter = que->adapter;
1196         struct tx_ring  *txr = que->txr;
1197         struct ifnet    *ifp = adapter->ifp;
1198
1199         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1200                 ixgbe_rxeof(que);
1201                 IXGBE_TX_LOCK(txr);
1202                 ixgbe_txeof(txr);
1203 #ifndef IXGBE_LEGACY_TX
1204                 if (!drbr_empty(ifp, txr->br))
1205                         ixgbe_mq_start_locked(ifp, txr);
1206 #else
1207                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1208                         ixgbe_start_locked(txr, ifp);
1209 #endif
1210                 IXGBE_TX_UNLOCK(txr);
1211         }
1212
1213         /* Reenable this interrupt */
1214         if (que->res != NULL)
1215                 ixgbe_enable_queue(adapter, que->msix);
1216         else
1217                 ixgbe_enable_intr(adapter);
1218         return;
1219 }
1220
1221
1222 /*********************************************************************
1223  *
1224  *  Legacy Interrupt Service routine
1225  *
1226  **********************************************************************/
1227
1228 static void
1229 ixgbe_legacy_irq(void *arg)
1230 {
1231         struct ix_queue *que = arg;
1232         struct adapter  *adapter = que->adapter;
1233         struct ixgbe_hw *hw = &adapter->hw;
1234         struct ifnet    *ifp = adapter->ifp;
1235         struct          tx_ring *txr = adapter->tx_rings;
1236         bool            more;
1237         u32             reg_eicr;
1238
1239
1240         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1241
1242         ++que->irqs;
1243         if (reg_eicr == 0) {
1244                 ixgbe_enable_intr(adapter);
1245                 return;
1246         }
1247
1248         more = ixgbe_rxeof(que);
1249
1250         IXGBE_TX_LOCK(txr);
1251         ixgbe_txeof(txr);
1252 #ifdef IXGBE_LEGACY_TX
1253         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1254                 ixgbe_start_locked(txr, ifp);
1255 #else
1256         if (!drbr_empty(ifp, txr->br))
1257                 ixgbe_mq_start_locked(ifp, txr);
1258 #endif
1259         IXGBE_TX_UNLOCK(txr);
1260
1261         /* Check for fan failure */
1262         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1263             (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1264                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1265                     "REPLACE IMMEDIATELY!!\n");
1266                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1267         }
1268
1269         /* Link status change */
1270         if (reg_eicr & IXGBE_EICR_LSC)
1271                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1272
1273         if (more)
1274                 taskqueue_enqueue(que->tq, &que->que_task);
1275         else
1276                 ixgbe_enable_intr(adapter);
1277         return;
1278 }
1279
1280
1281 /*********************************************************************
1282  *
1283  *  MSIX Queue Interrupt Service routine
1284  *
1285  **********************************************************************/
1286 void
1287 ixgbe_msix_que(void *arg)
1288 {
1289         struct ix_queue *que = arg;
1290         struct adapter  *adapter = que->adapter;
1291         struct ifnet    *ifp = adapter->ifp;
1292         struct tx_ring  *txr = que->txr;
1293         struct rx_ring  *rxr = que->rxr;
1294         bool            more;
1295         u32             newitr = 0;
1296
1297         /* Protect against spurious interrupts */
1298         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1299                 return;
1300
1301         ixgbe_disable_queue(adapter, que->msix);
1302         ++que->irqs;
1303
1304         more = ixgbe_rxeof(que);
1305
1306         IXGBE_TX_LOCK(txr);
1307         ixgbe_txeof(txr);
1308 #ifdef IXGBE_LEGACY_TX
1309         if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
1310                 ixgbe_start_locked(txr, ifp);
1311 #else
1312         if (!drbr_empty(ifp, txr->br))
1313                 ixgbe_mq_start_locked(ifp, txr);
1314 #endif
1315         IXGBE_TX_UNLOCK(txr);
1316
1317         /* Do AIM now? */
1318
1319         if (ixgbe_enable_aim == FALSE)
1320                 goto no_calc;
1321         /*
1322         ** Do Adaptive Interrupt Moderation:
1323         **  - Write out last calculated setting
1324         **  - Calculate based on average size over
1325         **    the last interval.
1326         */
1327         if (que->eitr_setting)
1328                 IXGBE_WRITE_REG(&adapter->hw,
1329                     IXGBE_EITR(que->msix), que->eitr_setting);
1330  
1331         que->eitr_setting = 0;
1332
1333         /* Idle, do nothing */
1334         if ((txr->bytes == 0) && (rxr->bytes == 0))
1335                 goto no_calc;
1336                                 
1337         if ((txr->bytes) && (txr->packets))
1338                 newitr = txr->bytes/txr->packets;
1339         if ((rxr->bytes) && (rxr->packets))
1340                 newitr = max(newitr,
1341                     (rxr->bytes / rxr->packets));
1342         newitr += 24; /* account for hardware frame, crc */
1343
1344         /* set an upper boundary */
1345         newitr = min(newitr, 3000);
1346
1347         /* Be nice to the mid range */
1348         if ((newitr > 300) && (newitr < 1200))
1349                 newitr = (newitr / 3);
1350         else
1351                 newitr = (newitr / 2);
1352
1353         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1354                 newitr |= newitr << 16;
1355         else
1356                 newitr |= IXGBE_EITR_CNT_WDIS;
1357                  
1358         /* save for next interrupt */
1359         que->eitr_setting = newitr;
1360
1361         /* Reset state */
1362         txr->bytes = 0;
1363         txr->packets = 0;
1364         rxr->bytes = 0;
1365         rxr->packets = 0;
1366
1367 no_calc:
1368         if (more)
1369                 taskqueue_enqueue(que->tq, &que->que_task);
1370         else
1371                 ixgbe_enable_queue(adapter, que->msix);
1372         return;
1373 }
1374
1375
1376 static void
1377 ixgbe_msix_link(void *arg)
1378 {
1379         struct adapter  *adapter = arg;
1380         struct ixgbe_hw *hw = &adapter->hw;
1381         u32             reg_eicr;
1382
1383         ++adapter->vector_irq;
1384
1385         /* First get the cause */
1386         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1387         /* Be sure the queue bits are not cleared */
1388         reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
1389         /* Clear interrupt with write */
1390         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1391
1392         /* Link status change */
1393         if (reg_eicr & IXGBE_EICR_LSC)
1394                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1395
1396         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1397 #ifdef IXGBE_FDIR
1398                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1399                         /* This is probably overkill :) */
1400                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1401                                 return;
1402                         /* Disable the interrupt */
1403                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1404                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1405                 } else
1406 #endif
1407                 if (reg_eicr & IXGBE_EICR_ECC) {
1408                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1409                             "Please Reboot!!\n");
1410                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1411                 } else
1412
1413                 if (ixgbe_is_sfp(hw)) {
1414                         if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1415                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1416                                 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1417                         } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1418                                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
1419                                 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1420                         }
1421                 }
1422         } 
1423
1424         /* Check for fan failure */
1425         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1426             (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
1427                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1428                     "REPLACE IMMEDIATELY!!\n");
1429                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
1430         }
1431
1432         /* Check for over temp condition */
1433         switch (hw->mac.type) {
1434         case ixgbe_mac_X540:
1435         case ixgbe_mac_X550:
1436         case ixgbe_mac_X550EM_a:
1437                 if (reg_eicr & IXGBE_EICR_TS) {
1438                         device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1439                             "PHY IS SHUT DOWN!!\n");
1440                         device_printf(adapter->dev, "System shutdown required\n");
1441                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1442                 }
1443                 break;
1444         default:
1445                 /* Other MACs have no thermal sensor interrupt */
1446                 break;
1447         }
1448
1449         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1450         return;
1451 }
1452
1453 /*********************************************************************
1454  *
1455  *  Media Ioctl callback
1456  *
1457  *  This routine is called whenever the user queries the status of
1458  *  the interface using ifconfig.
1459  *
1460  **********************************************************************/
1461 static void
1462 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1463 {
1464         struct adapter *adapter = ifp->if_softc;
1465         struct ixgbe_hw *hw = &adapter->hw;
1466         int layer;
1467
1468         INIT_DEBUGOUT("ixgbe_media_status: begin");
1469         IXGBE_CORE_LOCK(adapter);
1470         ixgbe_update_link_status(adapter);
1471
1472         ifmr->ifm_status = IFM_AVALID;
1473         ifmr->ifm_active = IFM_ETHER;
1474
1475         if (!adapter->link_active) {
1476                 IXGBE_CORE_UNLOCK(adapter);
1477                 return;
1478         }
1479
1480         ifmr->ifm_status |= IFM_ACTIVE;
1481         layer = ixgbe_get_supported_physical_layer(hw);
1482
1483         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1484             layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1485             layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1486                 switch (adapter->link_speed) {
1487                 case IXGBE_LINK_SPEED_10GB_FULL:
1488                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1489                         break;
1490                 case IXGBE_LINK_SPEED_1GB_FULL:
1491                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1492                         break;
1493                 case IXGBE_LINK_SPEED_100_FULL:
1494                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1495                         break;
1496                 }
1497         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1498             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1499                 switch (adapter->link_speed) {
1500                 case IXGBE_LINK_SPEED_10GB_FULL:
1501                         ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
1502                         break;
1503                 }
1504         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
1505                 switch (adapter->link_speed) {
1506                 case IXGBE_LINK_SPEED_10GB_FULL:
1507                         ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1508                         break;
1509                 case IXGBE_LINK_SPEED_1GB_FULL:
1510                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1511                         break;
1512                 }
1513         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1514                 switch (adapter->link_speed) {
1515                 case IXGBE_LINK_SPEED_10GB_FULL:
1516                         ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
1517                         break;
1518                 case IXGBE_LINK_SPEED_1GB_FULL:
1519                         ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1520                         break;
1521                 }
1522         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1523             layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1524                 switch (adapter->link_speed) {
1525                 case IXGBE_LINK_SPEED_10GB_FULL:
1526                         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1527                         break;
1528                 case IXGBE_LINK_SPEED_1GB_FULL:
1529                         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1530                         break;
1531                 }
1532         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1533                 switch (adapter->link_speed) {
1534                 case IXGBE_LINK_SPEED_10GB_FULL:
1535                         ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1536                         break;
1537                 }
1538         /*
1539         ** XXX: These need to use the proper media types once
1540         ** they're added.
1541         */
1542         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1543                 switch (adapter->link_speed) {
1544                 case IXGBE_LINK_SPEED_10GB_FULL:
1545                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1546                         break;
1547                 case IXGBE_LINK_SPEED_1GB_FULL:
1548                         ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
1549                         break;
1550                 }
1551         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4
1552             || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1553                 switch (adapter->link_speed) {
1554                 case IXGBE_LINK_SPEED_10GB_FULL:
1555                         ifmr->ifm_active |= IFM_10_2 | IFM_FDX;
1556                         break;
1557                 case IXGBE_LINK_SPEED_1GB_FULL:
1558                         ifmr->ifm_active |= IFM_10_5 | IFM_FDX;
1559                         break;
1560                 }
1561         
1562         /* If nothing is recognized... */
1563         if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
1564                 ifmr->ifm_active |= IFM_UNKNOWN;
1565         
1566 #if __FreeBSD_version >= 900025
1567         /* Flow control setting */
1568         if (adapter->fc == ixgbe_fc_rx_pause || adapter->fc == ixgbe_fc_full)
1569                 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1570         if (adapter->fc == ixgbe_fc_tx_pause || adapter->fc == ixgbe_fc_full)
1571                 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1572 #endif
1573
1574         IXGBE_CORE_UNLOCK(adapter);
1575
1576         return;
1577 }
1578
1579 /*********************************************************************
1580  *
1581  *  Media Ioctl callback
1582  *
1583  *  This routine is called when the user changes speed/duplex using
1584  *  media/mediopt option with ifconfig.
1585  *
1586  **********************************************************************/
1587 static int
1588 ixgbe_media_change(struct ifnet * ifp)
1589 {
1590         struct adapter *adapter = ifp->if_softc;
1591         struct ifmedia *ifm = &adapter->media;
1592         struct ixgbe_hw *hw = &adapter->hw;
1593         ixgbe_link_speed speed = 0;
1594
1595         INIT_DEBUGOUT("ixgbe_media_change: begin");
1596
1597         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1598                 return (EINVAL);
1599
1600         /*
1601         ** We don't actually need to check against the supported
1602         ** media types of the adapter; ifmedia will take care of
1603         ** that for us.
1604         **      NOTE: this relies on falling thru the switch
1605         **      to get all the values set, it can be confusing.
1606         */
1607         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1608                 case IFM_AUTO:
1609                 case IFM_10G_T:
1610                         speed |= IXGBE_LINK_SPEED_100_FULL;
1611                 case IFM_10G_LRM:
1612                 case IFM_10G_SR:  /* KR, too */
1613                 case IFM_10G_LR:
1614                 case IFM_10G_CX4: /* KX4 for now */
1615                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1616                 case IFM_10G_TWINAX:
1617                         speed |= IXGBE_LINK_SPEED_10GB_FULL;
1618                         break;
1619                 case IFM_1000_T:
1620                         speed |= IXGBE_LINK_SPEED_100_FULL;
1621                 case IFM_1000_LX:
1622                 case IFM_1000_SX:
1623                 case IFM_1000_CX: /* KX until there's real support */
1624                         speed |= IXGBE_LINK_SPEED_1GB_FULL;
1625                         break;
1626                 case IFM_100_TX:
1627                         speed |= IXGBE_LINK_SPEED_100_FULL;
1628                         break;
1629                 default:
1630                         goto invalid;
1631         }
1632
1633         hw->mac.autotry_restart = TRUE;
1634         hw->mac.ops.setup_link(hw, speed, TRUE);
1635         adapter->advertise =
1636                 ((speed & IXGBE_LINK_SPEED_10GB_FULL) << 2) |
1637                 ((speed & IXGBE_LINK_SPEED_1GB_FULL) << 1) |
1638                 ((speed & IXGBE_LINK_SPEED_100_FULL) << 0);
1639
1640         return (0);
1641
1642 invalid:
1643         device_printf(adapter->dev, "Invalid media type\n");
1644         return (EINVAL);
1645 }
1646
1647 static void
1648 ixgbe_set_promisc(struct adapter *adapter)
1649 {
1650         u_int32_t       reg_rctl;
1651         struct ifnet   *ifp = adapter->ifp;
1652         int             mcnt = 0;
1653
1654         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1655         reg_rctl &= (~IXGBE_FCTRL_UPE);
1656         if (ifp->if_flags & IFF_ALLMULTI)
1657                 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1658         else {
1659                 struct  ifmultiaddr *ifma;
1660 #if __FreeBSD_version < 800000
1661                 IF_ADDR_LOCK(ifp);
1662 #else
1663                 if_maddr_rlock(ifp);
1664 #endif
1665                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1666                         if (ifma->ifma_addr->sa_family != AF_LINK)
1667                                 continue;
1668                         if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1669                                 break;
1670                         mcnt++;
1671                 }
1672 #if __FreeBSD_version < 800000
1673                 IF_ADDR_UNLOCK(ifp);
1674 #else
1675                 if_maddr_runlock(ifp);
1676 #endif
1677         }
1678         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
1679                 reg_rctl &= (~IXGBE_FCTRL_MPE);
1680         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1681
1682         if (ifp->if_flags & IFF_PROMISC) {
1683                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1684                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1685         } else if (ifp->if_flags & IFF_ALLMULTI) {
1686                 reg_rctl |= IXGBE_FCTRL_MPE;
1687                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1688                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1689         }
1690         return;
1691 }
1692
1693
1694 /*********************************************************************
1695  *  Multicast Update
1696  *
1697  *  This routine is called whenever multicast address list is updated.
1698  *
1699  **********************************************************************/
1700 #define IXGBE_RAR_ENTRIES 16
1701
1702 static void
1703 ixgbe_set_multi(struct adapter *adapter)
1704 {
1705         u32     fctrl;
1706         u8      *mta;
1707         u8      *update_ptr;
1708         struct  ifmultiaddr *ifma;
1709         int     mcnt = 0;
1710         struct ifnet   *ifp = adapter->ifp;
1711
1712         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1713
1714         mta = adapter->mta;
1715         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1716             MAX_NUM_MULTICAST_ADDRESSES);
1717
1718 #if __FreeBSD_version < 800000
1719         IF_ADDR_LOCK(ifp);
1720 #else
1721         if_maddr_rlock(ifp);
1722 #endif
1723         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1724                 if (ifma->ifma_addr->sa_family != AF_LINK)
1725                         continue;
1726                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1727                         break;
1728                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1729                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1730                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1731                 mcnt++;
1732         }
1733 #if __FreeBSD_version < 800000
1734         IF_ADDR_UNLOCK(ifp);
1735 #else
1736         if_maddr_runlock(ifp);
1737 #endif
1738
1739         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1740         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1741         if (ifp->if_flags & IFF_PROMISC)
1742                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1743         else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
1744             ifp->if_flags & IFF_ALLMULTI) {
1745                 fctrl |= IXGBE_FCTRL_MPE;
1746                 fctrl &= ~IXGBE_FCTRL_UPE;
1747         } else
1748                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1749         
1750         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1751
1752         if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
1753                 update_ptr = mta;
1754                 ixgbe_update_mc_addr_list(&adapter->hw,
1755                     update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1756         }
1757
1758         return;
1759 }
1760
1761 /*
1762  * This is an iterator function now needed by the multicast
1763  * shared code. It simply feeds the shared code routine the
1764  * addresses in the array of ixgbe_set_multi() one by one.
1765  */
1766 static u8 *
1767 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1768 {
1769         u8 *addr = *update_ptr;
1770         u8 *newptr;
1771         *vmdq = 0;
1772
1773         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1774         *update_ptr = newptr;
1775         return addr;
1776 }
1777
1778
1779 /*********************************************************************
1780  *  Timer routine
1781  *
1782  *  This routine checks for link status,updates statistics,
1783  *  and runs the watchdog check.
1784  *
1785  **********************************************************************/
1786
1787 static void
1788 ixgbe_local_timer(void *arg)
1789 {
1790         struct adapter  *adapter = arg;
1791         device_t        dev = adapter->dev;
1792         struct ix_queue *que = adapter->queues;
1793         u64             queues = 0;
1794         int             hung = 0;
1795
1796         mtx_assert(&adapter->core_mtx, MA_OWNED);
1797
1798         /* Check for pluggable optics */
1799         if (adapter->sfp_probe)
1800                 if (!ixgbe_sfp_probe(adapter))
1801                         goto out; /* Nothing to do */
1802
1803         ixgbe_update_link_status(adapter);
1804         ixgbe_update_stats_counters(adapter);
1805
1806         /*
1807         ** Check the TX queues status
1808         **      - mark hung queues so we don't schedule on them
1809         **      - watchdog only if all queues show hung
1810         */          
1811         for (int i = 0; i < adapter->num_queues; i++, que++) {
1812                 /* Keep track of queues with work for soft irq */
1813                 if (que->txr->busy)
1814                         queues |= ((u64)1 << que->me);
1815                 /*
1816                 ** Each time txeof runs without cleaning, but there
1817                 ** are uncleaned descriptors it increments busy. If
1818                 ** we get to the MAX we declare it hung.
1819                 */
1820                 if (que->busy == IXGBE_QUEUE_HUNG) {
1821                         ++hung;
1822                         /* Mark the queue as inactive */
1823                         adapter->active_queues &= ~((u64)1 << que->me);
1824                         continue;
1825                 } else {
1826                         /* Check if we've come back from hung */
1827                         if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1828                                 adapter->active_queues |= ((u64)1 << que->me);
1829                 }
1830                 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1831                         device_printf(dev,"Warning queue %d "
1832                             "appears to be hung!\n", i);
1833                         que->txr->busy = IXGBE_QUEUE_HUNG;
1834                         ++hung;
1835                 }
1836
1837         }
1838
1839         /* Only truly watchdog if all queues show hung */
1840         if (hung == adapter->num_queues)
1841                 goto watchdog;
1842         else if (queues != 0) { /* Force an IRQ on queues with work */
1843                 ixgbe_rearm_queues(adapter, queues);
1844         }
1845
1846 out:
1847         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1848         return;
1849
1850 watchdog:
1851         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1852         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1853         adapter->watchdog_events++;
1854         ixgbe_init_locked(adapter);
1855 }
1856
1857 /*
1858 ** Note: this routine updates the OS on the link state
1859 **      the real check of the hardware only happens with
1860 **      a link interrupt.
1861 */
1862 static void
1863 ixgbe_update_link_status(struct adapter *adapter)
1864 {
1865         struct ifnet    *ifp = adapter->ifp;
1866         device_t dev = adapter->dev;
1867
1868
1869         if (adapter->link_up){ 
1870                 if (adapter->link_active == FALSE) {
1871                         if (bootverbose)
1872                                 device_printf(dev,"Link is up %d Gbps %s \n",
1873                                     ((adapter->link_speed == 128)? 10:1),
1874                                     "Full Duplex");
1875                         adapter->link_active = TRUE;
1876                         /* Update any Flow Control changes */
1877                         ixgbe_fc_enable(&adapter->hw);
1878                         if_link_state_change(ifp, LINK_STATE_UP);
1879                 }
1880         } else { /* Link down */
1881                 if (adapter->link_active == TRUE) {
1882                         if (bootverbose)
1883                                 device_printf(dev,"Link is Down\n");
1884                         if_link_state_change(ifp, LINK_STATE_DOWN);
1885                         adapter->link_active = FALSE;
1886                 }
1887         }
1888
1889         return;
1890 }
1891
1892
1893 /*********************************************************************
1894  *
1895  *  This routine disables all traffic on the adapter by issuing a
1896  *  global reset on the MAC and deallocates TX/RX buffers.
1897  *
1898  **********************************************************************/
1899
1900 static void
1901 ixgbe_stop(void *arg)
1902 {
1903         struct ifnet   *ifp;
1904         struct adapter *adapter = arg;
1905         struct ixgbe_hw *hw = &adapter->hw;
1906         ifp = adapter->ifp;
1907
1908         mtx_assert(&adapter->core_mtx, MA_OWNED);
1909
1910         INIT_DEBUGOUT("ixgbe_stop: begin\n");
1911         ixgbe_disable_intr(adapter);
1912         callout_stop(&adapter->timer);
1913
1914         /* Let the stack know...*/
1915         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1916
1917         ixgbe_reset_hw(hw);
1918         hw->adapter_stopped = FALSE;
1919         ixgbe_stop_adapter(hw);
1920         if (hw->mac.type == ixgbe_mac_82599EB)
1921                 ixgbe_stop_mac_link_on_d3_82599(hw);
1922         /* Turn off the laser - noop with no optics */
1923         ixgbe_disable_tx_laser(hw);
1924
1925         /* Update the stack */
1926         adapter->link_up = FALSE;
1927         ixgbe_update_link_status(adapter);
1928
1929         /* reprogram the RAR[0] in case user changed it. */
1930         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1931
1932         return;
1933 }
1934
1935
1936 /*********************************************************************
1937  *
1938  *  Determine hardware revision.
1939  *
1940  **********************************************************************/
1941 static void
1942 ixgbe_identify_hardware(struct adapter *adapter)
1943 {
1944         device_t        dev = adapter->dev;
1945         struct ixgbe_hw *hw = &adapter->hw;
1946
1947         /* Save off the information about this board */
1948         hw->vendor_id = pci_get_vendor(dev);
1949         hw->device_id = pci_get_device(dev);
1950         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1951         hw->subsystem_vendor_id =
1952             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1953         hw->subsystem_device_id =
1954             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1955
1956         /*
1957         ** Make sure BUSMASTER is set
1958         */
1959         pci_enable_busmaster(dev);
1960
1961         /* We need this here to set the num_segs below */
1962         ixgbe_set_mac_type(hw);
1963
1964         /* Pick up the 82599 and VF settings */
1965         if (hw->mac.type != ixgbe_mac_82598EB) {
1966                 hw->phy.smart_speed = ixgbe_smart_speed;
1967                 adapter->num_segs = IXGBE_82599_SCATTER;
1968         } else
1969                 adapter->num_segs = IXGBE_82598_SCATTER;
1970
1971         return;
1972 }
1973
1974 /*********************************************************************
1975  *
1976  *  Determine optic type
1977  *
1978  **********************************************************************/
1979 static void
1980 ixgbe_setup_optics(struct adapter *adapter)
1981 {
1982         struct ixgbe_hw *hw = &adapter->hw;
1983         int             layer;
1984
1985         layer = ixgbe_get_supported_physical_layer(hw);
1986
1987         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1988                 adapter->optics = IFM_10G_T;
1989                 return;
1990         }
1991
1992         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1993                 adapter->optics = IFM_1000_T;
1994                 return;
1995         }
1996
1997         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1998                 adapter->optics = IFM_1000_SX;
1999                 return;
2000         }
2001
2002         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2003             IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2004                 adapter->optics = IFM_10G_LR;
2005                 return;
2006         }
2007
2008         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2009                 adapter->optics = IFM_10G_SR;
2010                 return;
2011         }
2012
2013         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2014                 adapter->optics = IFM_10G_TWINAX;
2015                 return;
2016         }
2017
2018         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2019             IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2020                 adapter->optics = IFM_10G_CX4;
2021                 return;
2022         }
2023
2024         /* If we get here just set the default */
2025         adapter->optics = IFM_ETHER | IFM_AUTO;
2026         return;
2027 }
2028
2029 /*********************************************************************
2030  *
2031  *  Setup the Legacy or MSI Interrupt handler
2032  *
2033  **********************************************************************/
2034 static int
2035 ixgbe_allocate_legacy(struct adapter *adapter)
2036 {
2037         device_t        dev = adapter->dev;
2038         struct          ix_queue *que = adapter->queues;
2039 #ifndef IXGBE_LEGACY_TX
2040         struct tx_ring          *txr = adapter->tx_rings;
2041 #endif
2042         int             error, rid = 0;
2043
2044         /* MSI RID at 1 */
2045         if (adapter->msix == 1)
2046                 rid = 1;
2047
2048         /* We allocate a single interrupt resource */
2049         adapter->res = bus_alloc_resource_any(dev,
2050             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2051         if (adapter->res == NULL) {
2052                 device_printf(dev, "Unable to allocate bus resource: "
2053                     "interrupt\n");
2054                 return (ENXIO);
2055         }
2056
2057         /*
2058          * Try allocating a fast interrupt and the associated deferred
2059          * processing contexts.
2060          */
2061 #ifndef IXGBE_LEGACY_TX
2062         TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2063 #endif
2064         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2065         que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2066             taskqueue_thread_enqueue, &que->tq);
2067         taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
2068             device_get_nameunit(adapter->dev));
2069
2070         /* Tasklets for Link, SFP and Multispeed Fiber */
2071         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2072         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2073         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2074 #ifdef IXGBE_FDIR
2075         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2076 #endif
2077         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2078             taskqueue_thread_enqueue, &adapter->tq);
2079         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2080             device_get_nameunit(adapter->dev));
2081
2082         if ((error = bus_setup_intr(dev, adapter->res,
2083             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2084             que, &adapter->tag)) != 0) {
2085                 device_printf(dev, "Failed to register fast interrupt "
2086                     "handler: %d\n", error);
2087                 taskqueue_free(que->tq);
2088                 taskqueue_free(adapter->tq);
2089                 que->tq = NULL;
2090                 adapter->tq = NULL;
2091                 return (error);
2092         }
2093         /* For simplicity in the handlers */
2094         adapter->active_queues = IXGBE_EIMS_ENABLE_MASK;
2095
2096         return (0);
2097 }
2098
2099
2100 /*********************************************************************
2101  *
2102  *  Setup MSIX Interrupt resources and handlers 
2103  *
2104  **********************************************************************/
2105 static int
2106 ixgbe_allocate_msix(struct adapter *adapter)
2107 {
2108         device_t        dev = adapter->dev;
2109         struct          ix_queue *que = adapter->queues;
2110         struct          tx_ring *txr = adapter->tx_rings;
2111         int             error, rid, vector = 0;
2112         int             cpu_id = 0;
2113 #ifdef  RSS
2114         cpuset_t        cpu_mask;
2115 #endif
2116
2117 #ifdef  RSS
2118         /*
2119          * If we're doing RSS, the number of queues needs to
2120          * match the number of RSS buckets that are configured.
2121          *
2122          * + If there's more queues than RSS buckets, we'll end
2123          *   up with queues that get no traffic.
2124          *
2125          * + If there's more RSS buckets than queues, we'll end
2126          *   up having multiple RSS buckets map to the same queue,
2127          *   so there'll be some contention.
2128          */
2129         if (adapter->num_queues != rss_getnumbuckets()) {
2130                 device_printf(dev,
2131                     "%s: number of queues (%d) != number of RSS buckets (%d)"
2132                     "; performance will be impacted.\n",
2133                     __func__,
2134                     adapter->num_queues,
2135                     rss_getnumbuckets());
2136         }
2137 #endif
2138
2139
2140
2141         for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2142                 rid = vector + 1;
2143                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2144                     RF_SHAREABLE | RF_ACTIVE);
2145                 if (que->res == NULL) {
2146                         device_printf(dev,"Unable to allocate"
2147                             " bus resource: que interrupt [%d]\n", vector);
2148                         return (ENXIO);
2149                 }
2150                 /* Set the handler function */
2151                 error = bus_setup_intr(dev, que->res,
2152                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2153                     ixgbe_msix_que, que, &que->tag);
2154                 if (error) {
2155                         que->res = NULL;
2156                         device_printf(dev, "Failed to register QUE handler");
2157                         return (error);
2158                 }
2159 #if __FreeBSD_version >= 800504
2160                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2161 #endif
2162                 que->msix = vector;
2163                 adapter->active_queues |= (u64)(1 << que->msix);
2164 #ifdef  RSS
2165                 /*
2166                  * The queue ID is used as the RSS layer bucket ID.
2167                  * We look up the queue ID -> RSS CPU ID and select
2168                  * that.
2169                  */
2170                 cpu_id = rss_getcpu(i % rss_getnumbuckets());
2171 #else
2172                 /*
2173                  * Bind the msix vector, and thus the
2174                  * rings to the corresponding cpu.
2175                  *
2176                  * This just happens to match the default RSS round-robin
2177                  * bucket -> queue -> CPU allocation.
2178                  */
2179                 if (adapter->num_queues > 1)
2180                         cpu_id = i;
2181 #endif
2182                 if (adapter->num_queues > 1)
2183                         bus_bind_intr(dev, que->res, cpu_id);
2184
2185 #ifdef  RSS
2186                 device_printf(dev,
2187                     "Bound RSS bucket %d to CPU %d\n",
2188                     i, cpu_id);
2189 #else
2190 #if 0 // This is too noisy
2191                 device_printf(dev,
2192                     "Bound queue %d to cpu %d\n",
2193                     i, cpu_id);
2194 #endif
2195 #endif
2196
2197
2198 #ifndef IXGBE_LEGACY_TX
2199                 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
2200 #endif
2201                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2202                 que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
2203                     taskqueue_thread_enqueue, &que->tq);
2204 #ifdef  RSS
2205                 CPU_SETOF(cpu_id, &cpu_mask);
2206                 taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
2207                     &cpu_mask,
2208                     "%s (bucket %d)",
2209                     device_get_nameunit(adapter->dev),
2210                     cpu_id);
2211 #else
2212                 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
2213                     device_get_nameunit(adapter->dev));
2214 #endif
2215         }
2216
2217         /* and Link */
2218         rid = vector + 1;
2219         adapter->res = bus_alloc_resource_any(dev,
2220             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2221         if (!adapter->res) {
2222                 device_printf(dev,"Unable to allocate"
2223             " bus resource: Link interrupt [%d]\n", rid);
2224                 return (ENXIO);
2225         }
2226         /* Set the link handler function */
2227         error = bus_setup_intr(dev, adapter->res,
2228             INTR_TYPE_NET | INTR_MPSAFE, NULL,
2229             ixgbe_msix_link, adapter, &adapter->tag);
2230         if (error) {
2231                 adapter->res = NULL;
2232                 device_printf(dev, "Failed to register LINK handler");
2233                 return (error);
2234         }
2235 #if __FreeBSD_version >= 800504
2236         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2237 #endif
2238         adapter->vector = vector;
2239         /* Tasklets for Link, SFP and Multispeed Fiber */
2240         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2241         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2242         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2243 #ifdef IXGBE_FDIR
2244         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2245 #endif
2246         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2247             taskqueue_thread_enqueue, &adapter->tq);
2248         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2249             device_get_nameunit(adapter->dev));
2250
2251         return (0);
2252 }
2253
2254 /*
2255  * Setup Either MSI/X or MSI
2256  */
2257 static int
2258 ixgbe_setup_msix(struct adapter *adapter)
2259 {
2260         device_t dev = adapter->dev;
2261         int rid, want, queues, msgs;
2262
2263         /* Override by tuneable */
2264         if (ixgbe_enable_msix == 0)
2265                 goto msi;
2266
2267         /* First try MSI/X */
2268         msgs = pci_msix_count(dev); 
2269         if (msgs == 0)
2270                 goto msi;
2271         rid = PCIR_BAR(MSIX_82598_BAR);
2272         adapter->msix_mem = bus_alloc_resource_any(dev,
2273             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2274         if (adapter->msix_mem == NULL) {
2275                 rid += 4;       /* 82599 maps in higher BAR */
2276                 adapter->msix_mem = bus_alloc_resource_any(dev,
2277                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2278         }
2279         if (adapter->msix_mem == NULL) {
2280                 /* May not be enabled */
2281                 device_printf(adapter->dev,
2282                     "Unable to map MSIX table \n");
2283                 goto msi;
2284         }
2285
2286         /* Figure out a reasonable auto config value */
2287         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2288
2289 #ifdef  RSS
2290         /* If we're doing RSS, clamp at the number of RSS buckets */
2291         if (queues > rss_getnumbuckets())
2292                 queues = rss_getnumbuckets();
2293 #endif
2294
2295         if (ixgbe_num_queues != 0)
2296                 queues = ixgbe_num_queues;
2297
2298         /* reflect correct sysctl value */
2299         ixgbe_num_queues = queues;
2300
2301         /*
2302         ** Want one vector (RX/TX pair) per queue
2303         ** plus an additional for Link.
2304         */
2305         want = queues + 1;
2306         if (msgs >= want)
2307                 msgs = want;
2308         else {
2309                 device_printf(adapter->dev,
2310                     "MSIX Configuration Problem, "
2311                     "%d vectors but %d queues wanted!\n",
2312                     msgs, want);
2313                 goto msi;
2314         }
2315         if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
2316                 device_printf(adapter->dev,
2317                     "Using MSIX interrupts with %d vectors\n", msgs);
2318                 adapter->num_queues = queues;
2319                 return (msgs);
2320         }
2321         /*
2322         ** If MSIX alloc failed or provided us with
2323         ** less than needed, free and fall through to MSI
2324         */
2325         pci_release_msi(dev);
2326
2327 msi:
2328         if (adapter->msix_mem != NULL) {
2329                 bus_release_resource(dev, SYS_RES_MEMORY,
2330                     rid, adapter->msix_mem);
2331                 adapter->msix_mem = NULL;
2332         }
2333         msgs = 1;
2334         if (pci_alloc_msi(dev, &msgs) == 0) {
2335                 device_printf(adapter->dev,"Using an MSI interrupt\n");
2336                 return (msgs);
2337         }
2338         device_printf(adapter->dev,"Using a Legacy interrupt\n");
2339         return (0);
2340 }
2341
2342
2343 static int
2344 ixgbe_allocate_pci_resources(struct adapter *adapter)
2345 {
2346         int             rid;
2347         device_t        dev = adapter->dev;
2348
2349         rid = PCIR_BAR(0);
2350         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2351             &rid, RF_ACTIVE);
2352
2353         if (!(adapter->pci_mem)) {
2354                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2355                 return (ENXIO);
2356         }
2357
2358         adapter->osdep.mem_bus_space_tag =
2359                 rman_get_bustag(adapter->pci_mem);
2360         adapter->osdep.mem_bus_space_handle =
2361                 rman_get_bushandle(adapter->pci_mem);
2362         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2363
2364         /* Legacy defaults */
2365         adapter->num_queues = 1;
2366         adapter->hw.back = &adapter->osdep;
2367
2368         /*
2369         ** Now setup MSI or MSI/X, should
2370         ** return us the number of supported
2371         ** vectors. (Will be 1 for MSI)
2372         */
2373         adapter->msix = ixgbe_setup_msix(adapter);
2374         return (0);
2375 }
2376
2377 static void
2378 ixgbe_free_pci_resources(struct adapter * adapter)
2379 {
2380         struct          ix_queue *que = adapter->queues;
2381         device_t        dev = adapter->dev;
2382         int             rid, memrid;
2383
2384         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2385                 memrid = PCIR_BAR(MSIX_82598_BAR);
2386         else
2387                 memrid = PCIR_BAR(MSIX_82599_BAR);
2388
2389         /*
2390         ** There is a slight possibility of a failure mode
2391         ** in attach that will result in entering this function
2392         ** before interrupt resources have been initialized, and
2393         ** in that case we do not want to execute the loops below
2394         ** We can detect this reliably by the state of the adapter
2395         ** res pointer.
2396         */
2397         if (adapter->res == NULL)
2398                 goto mem;
2399
2400         /*
2401         **  Release all msix queue resources:
2402         */
2403         for (int i = 0; i < adapter->num_queues; i++, que++) {
2404                 rid = que->msix + 1;
2405                 if (que->tag != NULL) {
2406                         bus_teardown_intr(dev, que->res, que->tag);
2407                         que->tag = NULL;
2408                 }
2409                 if (que->res != NULL)
2410                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2411         }
2412
2413
2414         /* Clean the Legacy or Link interrupt last */
2415         if (adapter->vector) /* we are doing MSIX */
2416                 rid = adapter->vector + 1;
2417         else
2418                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2419
2420         if (adapter->tag != NULL) {
2421                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2422                 adapter->tag = NULL;
2423         }
2424         if (adapter->res != NULL)
2425                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2426
2427 mem:
2428         if (adapter->msix)
2429                 pci_release_msi(dev);
2430
2431         if (adapter->msix_mem != NULL)
2432                 bus_release_resource(dev, SYS_RES_MEMORY,
2433                     memrid, adapter->msix_mem);
2434
2435         if (adapter->pci_mem != NULL)
2436                 bus_release_resource(dev, SYS_RES_MEMORY,
2437                     PCIR_BAR(0), adapter->pci_mem);
2438
2439         return;
2440 }
2441
2442 /*********************************************************************
2443  *
2444  *  Setup networking device structure and register an interface.
2445  *
2446  **********************************************************************/
2447 static int
2448 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2449 {
2450         struct ifnet   *ifp;
2451
2452         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2453
2454         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2455         if (ifp == NULL) {
2456                 device_printf(dev, "can not allocate ifnet structure\n");
2457                 return (-1);
2458         }
2459         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2460         ifp->if_baudrate = IF_Gbps(10);
2461         ifp->if_init = ixgbe_init;
2462         ifp->if_softc = adapter;
2463         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2464         ifp->if_ioctl = ixgbe_ioctl;
2465 #if __FreeBSD_version >= 1100036
2466         if_setgetcounterfn(ifp, ixgbe_get_counter);
2467 #endif
2468 #ifndef IXGBE_LEGACY_TX
2469         ifp->if_transmit = ixgbe_mq_start;
2470         ifp->if_qflush = ixgbe_qflush;
2471 #else
2472         ifp->if_start = ixgbe_start;
2473         IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
2474         ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
2475         IFQ_SET_READY(&ifp->if_snd);
2476 #endif
2477
2478         ether_ifattach(ifp, adapter->hw.mac.addr);
2479
2480         adapter->max_frame_size =
2481             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2482
2483         /*
2484          * Tell the upper layer(s) we support long frames.
2485          */
2486         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
2487
2488         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2489         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2490         ifp->if_capabilities |= IFCAP_LRO;
2491         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2492                              |  IFCAP_VLAN_HWTSO
2493                              |  IFCAP_VLAN_MTU
2494                              |  IFCAP_HWSTATS;
2495         ifp->if_capenable = ifp->if_capabilities;
2496
2497         /*
2498         ** Don't turn this on by default, if vlans are
2499         ** created on another pseudo device (eg. lagg)
2500         ** then vlan events are not passed thru, breaking
2501         ** operation, but with HW FILTER off it works. If
2502         ** using vlans directly on the ixgbe driver you can
2503         ** enable this and get full hardware tag filtering.
2504         */
2505         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2506
2507         /*
2508          * Specify the media types supported by this adapter and register
2509          * callbacks to update media and link information
2510          */
2511         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2512                     ixgbe_media_status);
2513
2514         ixgbe_add_media_types(adapter);
2515
2516         /* Autoselect media by default */
2517         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2518
2519         return (0);
2520 }
2521
2522 static void
2523 ixgbe_add_media_types(struct adapter *adapter)
2524 {
2525         struct ixgbe_hw *hw = &adapter->hw;
2526         device_t dev = adapter->dev;
2527         int layer;
2528
2529         layer = ixgbe_get_supported_physical_layer(hw);
2530
2531         /* Media types with matching FreeBSD media defines */
2532         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
2533                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL);
2534         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
2535                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2536         if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
2537                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2538         
2539         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2540             layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2541                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
2542
2543         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2544                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
2545         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
2546                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
2547         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2548                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
2549         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2550                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2551 #if 0
2552         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
2553                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
2554 #endif
2555
2556         /*
2557         ** Other (no matching FreeBSD media type):
2558         ** To workaround this, we'll assign these completely
2559         ** inappropriate media types.
2560         */
2561         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
2562                 device_printf(dev, "Media supported: 10GbaseKR\n");
2563                 device_printf(dev, "10GbaseKR mapped to 10baseT\n");
2564                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2565         }
2566         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
2567                 device_printf(dev, "Media supported: 10GbaseKX4\n");
2568                 device_printf(dev, "10GbaseKX4 mapped to 10base2\n");
2569                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_2, 0, NULL);
2570         }
2571         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
2572                 device_printf(dev, "Media supported: 1000baseKX\n");
2573                 device_printf(dev, "1000baseKX mapped to 10base5\n");
2574                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_5, 0, NULL);
2575         }
2576         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
2577                 /* Someday, someone will care about you... */
2578                 device_printf(dev, "Media supported: 1000baseBX\n");
2579         }
2580         
2581         /* Very old */
2582         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2583                 ifmedia_add(&adapter->media,
2584                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2585                 ifmedia_add(&adapter->media,
2586                     IFM_ETHER | IFM_1000_T, 0, NULL);
2587         }
2588
2589         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2590 }
2591
2592 static void
2593 ixgbe_config_link(struct adapter *adapter)
2594 {
2595         struct ixgbe_hw *hw = &adapter->hw;
2596         u32     autoneg, err = 0;
2597         bool    sfp, negotiate;
2598
2599         sfp = ixgbe_is_sfp(hw);
2600
2601         if (sfp) { 
2602                 if (hw->phy.multispeed_fiber) {
2603                         hw->mac.ops.setup_sfp(hw);
2604                         ixgbe_enable_tx_laser(hw);
2605                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2606                 } else
2607                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2608         } else {
2609                 if (hw->mac.ops.check_link)
2610                         err = ixgbe_check_link(hw, &adapter->link_speed,
2611                             &adapter->link_up, FALSE);
2612                 if (err)
2613                         goto out;
2614                 autoneg = hw->phy.autoneg_advertised;
2615                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2616                         err  = hw->mac.ops.get_link_capabilities(hw,
2617                             &autoneg, &negotiate);
2618                 if (err)
2619                         goto out;
2620                 if (hw->mac.ops.setup_link)
2621                         err = hw->mac.ops.setup_link(hw,
2622                             autoneg, adapter->link_up);
2623         }
2624 out:
2625         return;
2626 }
2627
2628
2629 /*********************************************************************
2630  *
2631  *  Enable transmit units.
2632  *
2633  **********************************************************************/
2634 static void
2635 ixgbe_initialize_transmit_units(struct adapter *adapter)
2636 {
2637         struct tx_ring  *txr = adapter->tx_rings;
2638         struct ixgbe_hw *hw = &adapter->hw;
2639
2640         /* Setup the Base and Length of the Tx Descriptor Ring */
2641
2642         for (int i = 0; i < adapter->num_queues; i++, txr++) {
2643                 u64     tdba = txr->txdma.dma_paddr;
2644                 u32     txctrl = 0;
2645
2646                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2647                        (tdba & 0x00000000ffffffffULL));
2648                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2649                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2650                     adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
2651
2652                 /* Setup the HW Tx Head and Tail descriptor pointers */
2653                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2654                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2655
2656                 /* Cache the tail address */
2657                 txr->tail = IXGBE_TDT(txr->me);
2658
2659                 /* Set the processing limit */
2660                 txr->process_limit = ixgbe_tx_process_limit;
2661
2662                 /* Disable Head Writeback */
2663                 switch (hw->mac.type) {
2664                 case ixgbe_mac_82598EB:
2665                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2666                         break;
2667                 case ixgbe_mac_82599EB:
2668                 case ixgbe_mac_X540:
2669                 default:
2670                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2671                         break;
2672                 }
2673                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2674                 switch (hw->mac.type) {
2675                 case ixgbe_mac_82598EB:
2676                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2677                         break;
2678                 case ixgbe_mac_82599EB:
2679                 case ixgbe_mac_X540:
2680                 default:
2681                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2682                         break;
2683                 }
2684
2685         }
2686
2687         if (hw->mac.type != ixgbe_mac_82598EB) {
2688                 u32 dmatxctl, rttdcs;
2689                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2690                 dmatxctl |= IXGBE_DMATXCTL_TE;
2691                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2692                 /* Disable arbiter to set MTQC */
2693                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2694                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2695                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2696                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2697                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2698                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2699         }
2700
2701         return;
2702 }
2703
2704 static void
2705 ixgbe_initialise_rss_mapping(struct adapter *adapter)
2706 {
2707         struct ixgbe_hw *hw = &adapter->hw;
2708         uint32_t reta;
2709         int i, j, queue_id;
2710         uint32_t rss_key[10];
2711         uint32_t mrqc;
2712 #ifdef  RSS
2713         uint32_t rss_hash_config;
2714 #endif
2715
2716         /* Setup RSS */
2717         reta = 0;
2718
2719 #ifdef  RSS
2720         /* Fetch the configured RSS key */
2721         rss_getkey((uint8_t *) &rss_key);
2722 #else
2723         /* set up random bits */
2724         arc4rand(&rss_key, sizeof(rss_key), 0);
2725 #endif
2726
2727         /* Set up the redirection table */
2728         for (i = 0, j = 0; i < 128; i++, j++) {
2729                 if (j == adapter->num_queues) j = 0;
2730 #ifdef  RSS
2731                 /*
2732                  * Fetch the RSS bucket id for the given indirection entry.
2733                  * Cap it at the number of configured buckets (which is
2734                  * num_queues.)
2735                  */
2736                 queue_id = rss_get_indirection_to_bucket(i);
2737                 queue_id = queue_id % adapter->num_queues;
2738 #else
2739                 queue_id = (j * 0x11);
2740 #endif
2741                 /*
2742                  * The low 8 bits are for hash value (n+0);
2743                  * The next 8 bits are for hash value (n+1), etc.
2744                  */
2745                 reta = reta >> 8;
2746                 reta = reta | ( ((uint32_t) queue_id) << 24);
2747                 if ((i & 3) == 3) {
2748                         IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2749                         reta = 0;
2750                 }
2751         }
2752
2753         /* Now fill our hash function seeds */
2754         for (int i = 0; i < 10; i++)
2755                 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2756
2757         /* Perform hash on these packet types */
2758 #ifdef  RSS
2759         mrqc = IXGBE_MRQC_RSSEN;
2760         rss_hash_config = rss_gethashconfig();
2761         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
2762                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
2763         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
2764                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
2765         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
2766                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
2767         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
2768                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2769         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
2770                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
2771         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
2772                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
2773         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
2774                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2775         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4_EX)
2776                 device_printf(adapter->dev,
2777                     "%s: RSS_HASHTYPE_RSS_UDP_IPV4_EX defined, "
2778                     "but not supported\n", __func__);
2779         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
2780                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2781         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
2782                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2783 #else
2784         /*
2785          * Disable UDP - IP fragments aren't currently being handled
2786          * and so we end up with a mix of 2-tuple and 4-tuple
2787          * traffic.
2788          */
2789         mrqc = IXGBE_MRQC_RSSEN
2790              | IXGBE_MRQC_RSS_FIELD_IPV4
2791              | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2792 #if 0
2793              | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2794 #endif
2795              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2796              | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2797              | IXGBE_MRQC_RSS_FIELD_IPV6
2798              | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2799 #if 0
2800              | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2801              | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
2802 #endif
2803         ;
2804 #endif /* RSS */
2805         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2806 }
2807
2808
2809 /*********************************************************************
2810  *
2811  *  Setup receive registers and features.
2812  *
2813  **********************************************************************/
2814 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2815
2816 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
2817         
2818 static void
2819 ixgbe_initialize_receive_units(struct adapter *adapter)
2820 {
2821         struct  rx_ring *rxr = adapter->rx_rings;
2822         struct ixgbe_hw *hw = &adapter->hw;
2823         struct ifnet   *ifp = adapter->ifp;
2824         u32             bufsz, fctrl, srrctl, rxcsum;
2825         u32             hlreg;
2826
2827
2828         /*
2829          * Make sure receives are disabled while
2830          * setting up the descriptor ring
2831          */
2832         ixgbe_disable_rx(hw);
2833
2834         /* Enable broadcasts */
2835         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2836         fctrl |= IXGBE_FCTRL_BAM;
2837         fctrl |= IXGBE_FCTRL_DPF;
2838         fctrl |= IXGBE_FCTRL_PMCF;
2839         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2840
2841         /* Set for Jumbo Frames? */
2842         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2843         if (ifp->if_mtu > ETHERMTU)
2844                 hlreg |= IXGBE_HLREG0_JUMBOEN;
2845         else
2846                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2847 #ifdef DEV_NETMAP
2848         /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
2849         if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
2850                 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
2851         else
2852                 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
2853 #endif /* DEV_NETMAP */
2854         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2855
2856         bufsz = (adapter->rx_mbuf_sz +
2857             BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2858
2859         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
2860                 u64 rdba = rxr->rxdma.dma_paddr;
2861
2862                 /* Setup the Base and Length of the Rx Descriptor Ring */
2863                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2864                                (rdba & 0x00000000ffffffffULL));
2865                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2866                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2867                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2868
2869                 /* Set up the SRRCTL register */
2870                 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
2871                 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2872                 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2873                 srrctl |= bufsz;
2874                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2875
2876                 /*
2877                  * Set DROP_EN iff we have no flow control and >1 queue.
2878                  * Note that srrctl was cleared shortly before during reset,
2879                  * so we do not need to clear the bit, but do it just in case
2880                  * this code is moved elsewhere.
2881                  */
2882                 if (adapter->num_queues > 1 &&
2883                     adapter->hw.fc.requested_mode == ixgbe_fc_none) {
2884                         srrctl |= IXGBE_SRRCTL_DROP_EN;
2885                 } else {
2886                         srrctl &= ~IXGBE_SRRCTL_DROP_EN;
2887                 }
2888
2889                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2890
2891                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2892                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2893                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2894
2895                 /* Set the processing limit */
2896                 rxr->process_limit = ixgbe_rx_process_limit;
2897
2898                 /* Set the driver rx tail address */
2899                 rxr->tail =  IXGBE_RDT(rxr->me);
2900         }
2901
2902         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
2903                 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2904                               IXGBE_PSRTYPE_UDPHDR |
2905                               IXGBE_PSRTYPE_IPV4HDR |
2906                               IXGBE_PSRTYPE_IPV6HDR;
2907                 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2908         }
2909
2910         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2911
2912         ixgbe_initialise_rss_mapping(adapter);
2913
2914         if (adapter->num_queues > 1) {
2915                 /* RSS and RX IPP Checksum are mutually exclusive */
2916                 rxcsum |= IXGBE_RXCSUM_PCSD;
2917         }
2918
2919         if (ifp->if_capenable & IFCAP_RXCSUM)
2920                 rxcsum |= IXGBE_RXCSUM_PCSD;
2921
2922         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2923                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2924
2925         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2926
2927         return;
2928 }
2929
2930
2931 /*
2932 ** This routine is run via an vlan config EVENT,
2933 ** it enables us to use the HW Filter table since
2934 ** we can get the vlan id. This just creates the
2935 ** entry in the soft version of the VFTA, init will
2936 ** repopulate the real table.
2937 */
2938 static void
2939 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2940 {
2941         struct adapter  *adapter = ifp->if_softc;
2942         u16             index, bit;
2943
2944         if (ifp->if_softc !=  arg)   /* Not our event */
2945                 return;
2946
2947         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2948                 return;
2949
2950         IXGBE_CORE_LOCK(adapter);
2951         index = (vtag >> 5) & 0x7F;
2952         bit = vtag & 0x1F;
2953         adapter->shadow_vfta[index] |= (1 << bit);
2954         ++adapter->num_vlans;
2955         ixgbe_setup_vlan_hw_support(adapter);
2956         IXGBE_CORE_UNLOCK(adapter);
2957 }
2958
2959 /*
2960 ** This routine is run via an vlan
2961 ** unconfig EVENT, remove our entry
2962 ** in the soft vfta.
2963 */
2964 static void
2965 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2966 {
2967         struct adapter  *adapter = ifp->if_softc;
2968         u16             index, bit;
2969
2970         if (ifp->if_softc !=  arg)
2971                 return;
2972
2973         if ((vtag == 0) || (vtag > 4095))       /* Invalid */
2974                 return;
2975
2976         IXGBE_CORE_LOCK(adapter);
2977         index = (vtag >> 5) & 0x7F;
2978         bit = vtag & 0x1F;
2979         adapter->shadow_vfta[index] &= ~(1 << bit);
2980         --adapter->num_vlans;
2981         /* Re-init to load the changes */
2982         ixgbe_setup_vlan_hw_support(adapter);
2983         IXGBE_CORE_UNLOCK(adapter);
2984 }
2985
2986 static void
2987 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2988 {
2989         struct ifnet    *ifp = adapter->ifp;
2990         struct ixgbe_hw *hw = &adapter->hw;
2991         struct rx_ring  *rxr;
2992         u32             ctrl;
2993
2994
2995         /*
2996         ** We get here thru init_locked, meaning
2997         ** a soft reset, this has already cleared
2998         ** the VFTA and other state, so if there
2999         ** have been no vlan's registered do nothing.
3000         */
3001         if (adapter->num_vlans == 0)
3002                 return;
3003
3004         /* Setup the queues for vlans */
3005         for (int i = 0; i < adapter->num_queues; i++) {
3006                 rxr = &adapter->rx_rings[i];
3007                 /* On 82599 the VLAN enable is per/queue in RXDCTL */
3008                 if (hw->mac.type != ixgbe_mac_82598EB) {
3009                         ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
3010                         ctrl |= IXGBE_RXDCTL_VME;
3011                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
3012                 }
3013                 rxr->vtag_strip = TRUE;
3014         }
3015
3016         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3017                 return;
3018         /*
3019         ** A soft reset zero's out the VFTA, so
3020         ** we need to repopulate it now.
3021         */
3022         for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
3023                 if (adapter->shadow_vfta[i] != 0)
3024                         IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
3025                             adapter->shadow_vfta[i]);
3026
3027         ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3028         /* Enable the Filter Table if enabled */
3029         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3030                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3031                 ctrl |= IXGBE_VLNCTRL_VFE;
3032         }
3033         if (hw->mac.type == ixgbe_mac_82598EB)
3034                 ctrl |= IXGBE_VLNCTRL_VME;
3035         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
3036 }
3037
3038 static void
3039 ixgbe_enable_intr(struct adapter *adapter)
3040 {
3041         struct ixgbe_hw *hw = &adapter->hw;
3042         struct ix_queue *que = adapter->queues;
3043         u32             mask, fwsm;
3044
3045         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3046         /* Enable Fan Failure detection */
3047         if (hw->device_id == IXGBE_DEV_ID_82598AT)
3048                     mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3049
3050         switch (adapter->hw.mac.type) {
3051                 case ixgbe_mac_82599EB:
3052                         mask |= IXGBE_EIMS_ECC;
3053                         /* Temperature sensor on some adapters */
3054                         mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3055                         /* SFP+ (RX_LOS_N & MOD_ABS_N) */
3056                         mask |= IXGBE_EIMS_GPI_SDP1_BY_MAC(hw);
3057                         mask |= IXGBE_EIMS_GPI_SDP2_BY_MAC(hw);
3058 #ifdef IXGBE_FDIR
3059                         mask |= IXGBE_EIMS_FLOW_DIR;
3060 #endif
3061                         break;
3062                 case ixgbe_mac_X540:
3063                 case ixgbe_mac_X550:
3064                 case ixgbe_mac_X550EM_a:
3065                 case ixgbe_mac_X550EM_x:
3066                         /* Detect if Thermal Sensor is enabled */
3067                         fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3068                         if (fwsm & IXGBE_FWSM_TS_ENABLED)
3069                                 mask |= IXGBE_EIMS_TS;
3070                         /* XXX: Which SFP mode line does this look at? */
3071                         if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
3072                                 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
3073                         mask |= IXGBE_EIMS_ECC;
3074 #ifdef IXGBE_FDIR
3075                         mask |= IXGBE_EIMS_FLOW_DIR;
3076 #endif
3077                 /* falls through */
3078                 default:
3079                         break;
3080         }
3081
3082         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3083
3084         /* With RSS we use auto clear */
3085         if (adapter->msix_mem) {
3086                 mask = IXGBE_EIMS_ENABLE_MASK;
3087                 /* Don't autoclear Link */
3088                 mask &= ~IXGBE_EIMS_OTHER;
3089                 mask &= ~IXGBE_EIMS_LSC;
3090                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3091         }
3092
3093         /*
3094         ** Now enable all queues, this is done separately to
3095         ** allow for handling the extended (beyond 32) MSIX
3096         ** vectors that can be used by 82599
3097         */
3098         for (int i = 0; i < adapter->num_queues; i++, que++)
3099                 ixgbe_enable_queue(adapter, que->msix);
3100
3101         IXGBE_WRITE_FLUSH(hw);
3102
3103         return;
3104 }
3105
3106 static void
3107 ixgbe_disable_intr(struct adapter *adapter)
3108 {
3109         if (adapter->msix_mem)
3110                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
3111         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3112                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3113         } else {
3114                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
3115                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
3116                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
3117         }
3118         IXGBE_WRITE_FLUSH(&adapter->hw);
3119         return;
3120 }
3121
3122 /*
3123 ** Get the width and transaction speed of
3124 ** the slot this adapter is plugged into.
3125 */
3126 static void
3127 ixgbe_get_slot_info(struct ixgbe_hw *hw)
3128 {
3129         device_t                dev = ((struct ixgbe_osdep *)hw->back)->dev;
3130         struct ixgbe_mac_info   *mac = &hw->mac;
3131         u16                     link;
3132         u32                     offset;
3133
3134         /* For most devices simply call the shared code routine */
3135         if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
3136                 ixgbe_get_bus_info(hw);
3137                 /* These devices don't use PCI-E */
3138                 if (hw->mac.type == ixgbe_mac_X550EM_x
3139                     || hw->mac.type == ixgbe_mac_X550EM_a)
3140                         return;
3141                 goto display;
3142         }
3143
3144         /*
3145         ** For the Quad port adapter we need to parse back
3146         ** up the PCI tree to find the speed of the expansion
3147         ** slot into which this adapter is plugged. A bit more work.
3148         */
3149         dev = device_get_parent(device_get_parent(dev));
3150 #ifdef IXGBE_DEBUG
3151         device_printf(dev, "parent pcib = %x,%x,%x\n",
3152             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3153 #endif
3154         dev = device_get_parent(device_get_parent(dev));
3155 #ifdef IXGBE_DEBUG
3156         device_printf(dev, "slot pcib = %x,%x,%x\n",
3157             pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
3158 #endif
3159         /* Now get the PCI Express Capabilities offset */
3160         pci_find_cap(dev, PCIY_EXPRESS, &offset);
3161         /* ...and read the Link Status Register */
3162         link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3163         switch (link & IXGBE_PCI_LINK_WIDTH) {
3164         case IXGBE_PCI_LINK_WIDTH_1:
3165                 hw->bus.width = ixgbe_bus_width_pcie_x1;
3166                 break;
3167         case IXGBE_PCI_LINK_WIDTH_2:
3168                 hw->bus.width = ixgbe_bus_width_pcie_x2;
3169                 break;
3170         case IXGBE_PCI_LINK_WIDTH_4:
3171                 hw->bus.width = ixgbe_bus_width_pcie_x4;
3172                 break;
3173         case IXGBE_PCI_LINK_WIDTH_8:
3174                 hw->bus.width = ixgbe_bus_width_pcie_x8;
3175                 break;
3176         default:
3177                 hw->bus.width = ixgbe_bus_width_unknown;
3178                 break;
3179         }
3180
3181         switch (link & IXGBE_PCI_LINK_SPEED) {
3182         case IXGBE_PCI_LINK_SPEED_2500:
3183                 hw->bus.speed = ixgbe_bus_speed_2500;
3184                 break;
3185         case IXGBE_PCI_LINK_SPEED_5000:
3186                 hw->bus.speed = ixgbe_bus_speed_5000;
3187                 break;
3188         case IXGBE_PCI_LINK_SPEED_8000:
3189                 hw->bus.speed = ixgbe_bus_speed_8000;
3190                 break;
3191         default:
3192                 hw->bus.speed = ixgbe_bus_speed_unknown;
3193                 break;
3194         }
3195
3196         mac->ops.set_lan_id(hw);
3197
3198 display:
3199         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3200             ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
3201             (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
3202             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3203             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
3204             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
3205             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
3206             ("Unknown"));
3207
3208         if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3209             ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
3210             (hw->bus.speed == ixgbe_bus_speed_2500))) {
3211                 device_printf(dev, "PCI-Express bandwidth available"
3212                     " for this card\n     is not sufficient for"
3213                     " optimal performance.\n");
3214                 device_printf(dev, "For optimal performance a x8 "
3215                     "PCIE, or x4 PCIE Gen2 slot is required.\n");
3216         }
3217         if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
3218             ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
3219             (hw->bus.speed < ixgbe_bus_speed_8000))) {
3220                 device_printf(dev, "PCI-Express bandwidth available"
3221                     " for this card\n     is not sufficient for"
3222                     " optimal performance.\n");
3223                 device_printf(dev, "For optimal performance a x8 "
3224                     "PCIE Gen3 slot is required.\n");
3225         }
3226
3227         return;
3228 }
3229
3230
3231 /*
3232 ** Setup the correct IVAR register for a particular MSIX interrupt
3233 **   (yes this is all very magic and confusing :)
3234 **  - entry is the register array entry
3235 **  - vector is the MSIX vector for this queue
3236 **  - type is RX/TX/MISC
3237 */
3238 static void
3239 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3240 {
3241         struct ixgbe_hw *hw = &adapter->hw;
3242         u32 ivar, index;
3243
3244         vector |= IXGBE_IVAR_ALLOC_VAL;
3245
3246         switch (hw->mac.type) {
3247
3248         case ixgbe_mac_82598EB:
3249                 if (type == -1)
3250                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3251                 else
3252                         entry += (type * 64);
3253                 index = (entry >> 2) & 0x1F;
3254                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3255                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
3256                 ivar |= (vector << (8 * (entry & 0x3)));
3257                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3258                 break;
3259
3260         case ixgbe_mac_82599EB:
3261         case ixgbe_mac_X540:
3262         case ixgbe_mac_X550:
3263         case ixgbe_mac_X550EM_a:
3264         case ixgbe_mac_X550EM_x:
3265                 if (type == -1) { /* MISC IVAR */
3266                         index = (entry & 1) * 8;
3267                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3268                         ivar &= ~(0xFF << index);
3269                         ivar |= (vector << index);
3270                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3271                 } else {        /* RX/TX IVARS */
3272                         index = (16 * (entry & 1)) + (8 * type);
3273                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3274                         ivar &= ~(0xFF << index);
3275                         ivar |= (vector << index);
3276                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3277                 }
3278
3279         default:
3280                 break;
3281         }
3282 }
3283
3284 static void
3285 ixgbe_configure_ivars(struct adapter *adapter)
3286 {
3287         struct  ix_queue *que = adapter->queues;
3288         u32 newitr;
3289
3290         if (ixgbe_max_interrupt_rate > 0)
3291                 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3292         else
3293                 newitr = 0;
3294
3295         for (int i = 0; i < adapter->num_queues; i++, que++) {
3296                 /* First the RX queue entry */
3297                 ixgbe_set_ivar(adapter, i, que->msix, 0);
3298                 /* ... and the TX */
3299                 ixgbe_set_ivar(adapter, i, que->msix, 1);
3300                 /* Set an Initial EITR value */
3301                 IXGBE_WRITE_REG(&adapter->hw,
3302                     IXGBE_EITR(que->msix), newitr);
3303         }
3304
3305         /* For the Link interrupt */
3306         ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
3307 }
3308
3309 /*
3310 ** ixgbe_sfp_probe - called in the local timer to
3311 ** determine if a port had optics inserted.
3312 */  
3313 static bool ixgbe_sfp_probe(struct adapter *adapter)
3314 {
3315         struct ixgbe_hw *hw = &adapter->hw;
3316         device_t        dev = adapter->dev;
3317         bool            result = FALSE;
3318
3319         if ((hw->phy.type == ixgbe_phy_nl) &&
3320             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3321                 s32 ret = hw->phy.ops.identify_sfp(hw);
3322                 if (ret)
3323                         goto out;
3324                 ret = hw->phy.ops.reset(hw);
3325                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3326                         device_printf(dev,"Unsupported SFP+ module detected!");
3327                         printf(" Reload driver with supported module.\n");
3328                         adapter->sfp_probe = FALSE;
3329                         goto out;
3330                 } else
3331                         device_printf(dev,"SFP+ module detected!\n");
3332                 /* We now have supported optics */
3333                 adapter->sfp_probe = FALSE;
3334                 /* Set the optics type so system reports correctly */
3335                 ixgbe_setup_optics(adapter);
3336                 result = TRUE;
3337         }
3338 out:
3339         return (result);
3340 }
3341
3342 /*
3343 ** Tasklet handler for MSIX Link interrupts
3344 **  - do outside interrupt since it might sleep
3345 */
3346 static void
3347 ixgbe_handle_link(void *context, int pending)
3348 {
3349         struct adapter  *adapter = context;
3350
3351         ixgbe_check_link(&adapter->hw,
3352             &adapter->link_speed, &adapter->link_up, 0);
3353         ixgbe_update_link_status(adapter);
3354 }
3355
3356 /*
3357 ** Tasklet for handling SFP module interrupts
3358 */
3359 static void
3360 ixgbe_handle_mod(void *context, int pending)
3361 {
3362         struct adapter  *adapter = context;
3363         struct ixgbe_hw *hw = &adapter->hw;
3364         device_t        dev = adapter->dev;
3365         u32 err;
3366
3367         err = hw->phy.ops.identify_sfp(hw);
3368         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3369                 device_printf(dev,
3370                     "Unsupported SFP+ module type was detected.\n");
3371                 return;
3372         }
3373         err = hw->mac.ops.setup_sfp(hw);
3374         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3375                 device_printf(dev,
3376                     "Setup failure - unsupported SFP+ module type.\n");
3377                 return;
3378         }
3379         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
3380         return;
3381 }
3382
3383
3384 /*
3385 ** Tasklet for handling MSF (multispeed fiber) interrupts
3386 */
3387 static void
3388 ixgbe_handle_msf(void *context, int pending)
3389 {
3390         struct adapter  *adapter = context;
3391         struct ixgbe_hw *hw = &adapter->hw;
3392         u32 autoneg;
3393         bool negotiate;
3394         int err;
3395
3396         err = hw->phy.ops.identify_sfp(hw);
3397         if (!err) {
3398                 ixgbe_setup_optics(adapter);
3399                 INIT_DEBUGOUT1("ixgbe_sfp_probe: flags: %X\n", adapter->optics);
3400         }
3401
3402         autoneg = hw->phy.autoneg_advertised;
3403         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
3404                 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
3405         if (hw->mac.ops.setup_link)
3406                 hw->mac.ops.setup_link(hw, autoneg, TRUE);
3407
3408         ifmedia_removeall(&adapter->media);
3409         ixgbe_add_media_types(adapter);
3410         return;
3411 }
3412
3413 #ifdef IXGBE_FDIR
3414 /*
3415 ** Tasklet for reinitializing the Flow Director filter table
3416 */
3417 static void
3418 ixgbe_reinit_fdir(void *context, int pending)
3419 {
3420         struct adapter  *adapter = context;
3421         struct ifnet   *ifp = adapter->ifp;
3422
3423         if (adapter->fdir_reinit != 1) /* Shouldn't happen */
3424                 return;
3425         ixgbe_reinit_fdir_tables_82599(&adapter->hw);
3426         adapter->fdir_reinit = 0;
3427         /* re-enable flow director interrupts */
3428         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
3429         /* Restart the interface */
3430         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3431         return;
3432 }
3433 #endif
3434
3435 /**********************************************************************
3436  *
3437  *  Update the board statistics counters.
3438  *
3439  **********************************************************************/
3440 static void
3441 ixgbe_update_stats_counters(struct adapter *adapter)
3442 {
3443         struct ixgbe_hw *hw = &adapter->hw;
3444         u32 missed_rx = 0, bprc, lxon, lxoff, total;
3445         u64 total_missed_rx = 0;
3446
3447         adapter->stats.pf.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3448         adapter->stats.pf.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
3449         adapter->stats.pf.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
3450         adapter->stats.pf.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
3451
3452         /*
3453         ** Note: these are for the 8 possible traffic classes,
3454         **       which in current implementation is unused,
3455         **       therefore only 0 should read real data.
3456         */
3457         for (int i = 0; i < 8; i++) {
3458                 u32 mp;
3459                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3460                 /* missed_rx tallies misses for the gprc workaround */
3461                 missed_rx += mp;
3462                 /* global total per queue */
3463                 adapter->stats.pf.mpc[i] += mp;
3464                 /* total for stats display */
3465                 total_missed_rx += adapter->stats.pf.mpc[i];
3466                 if (hw->mac.type == ixgbe_mac_82598EB) {
3467                         adapter->stats.pf.rnbc[i] +=
3468                             IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3469                         adapter->stats.pf.qbtc[i] +=
3470                             IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3471                         adapter->stats.pf.qbrc[i] +=
3472                             IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3473                         adapter->stats.pf.pxonrxc[i] +=
3474                             IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
3475                 } else
3476                         adapter->stats.pf.pxonrxc[i] +=
3477                             IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
3478                 adapter->stats.pf.pxontxc[i] +=
3479                     IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
3480                 adapter->stats.pf.pxofftxc[i] +=
3481                     IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
3482                 if (hw->mac.type != ixgbe_mac_X550EM_x)
3483                         adapter->stats.pf.pxoffrxc[i] +=
3484                             IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
3485                 adapter->stats.pf.pxon2offc[i] +=
3486                     IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
3487         }
3488         for (int i = 0; i < 16; i++) {
3489                 adapter->stats.pf.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3490                 adapter->stats.pf.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3491                 adapter->stats.pf.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3492         }
3493         adapter->stats.pf.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
3494         adapter->stats.pf.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
3495         adapter->stats.pf.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3496
3497         /* Hardware workaround, gprc counts missed packets */
3498         adapter->stats.pf.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3499         adapter->stats.pf.gprc -= missed_rx;
3500
3501         if (hw->mac.type != ixgbe_mac_82598EB) {
3502                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3503                     ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3504                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3505                     ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3506                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3507                     ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3508                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3509                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3510         } else {
3511                 adapter->stats.pf.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3512                 adapter->stats.pf.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3513                 /* 82598 only has a counter in the high register */
3514                 adapter->stats.pf.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3515                 adapter->stats.pf.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3516                 adapter->stats.pf.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3517         }
3518
3519         /*
3520          * Workaround: mprc hardware is incorrectly counting
3521          * broadcasts, so for now we subtract those.
3522          */
3523         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3524         adapter->stats.pf.bprc += bprc;
3525         adapter->stats.pf.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3526         if (hw->mac.type == ixgbe_mac_82598EB)
3527                 adapter->stats.pf.mprc -= bprc;
3528
3529         adapter->stats.pf.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3530         adapter->stats.pf.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3531         adapter->stats.pf.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3532         adapter->stats.pf.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3533         adapter->stats.pf.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3534         adapter->stats.pf.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3535
3536         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3537         adapter->stats.pf.lxontxc += lxon;
3538         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3539         adapter->stats.pf.lxofftxc += lxoff;
3540         total = lxon + lxoff;
3541
3542         adapter->stats.pf.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3543         adapter->stats.pf.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3544         adapter->stats.pf.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3545         adapter->stats.pf.gptc -= total;
3546         adapter->stats.pf.mptc -= total;
3547         adapter->stats.pf.ptc64 -= total;
3548         adapter->stats.pf.gotc -= total * ETHER_MIN_LEN;
3549
3550         adapter->stats.pf.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3551         adapter->stats.pf.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3552         adapter->stats.pf.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3553         adapter->stats.pf.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3554         adapter->stats.pf.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
3555         adapter->stats.pf.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
3556         adapter->stats.pf.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
3557         adapter->stats.pf.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3558         adapter->stats.pf.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
3559         adapter->stats.pf.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3560         adapter->stats.pf.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3561         adapter->stats.pf.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3562         adapter->stats.pf.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3563         adapter->stats.pf.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3564         adapter->stats.pf.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3565         adapter->stats.pf.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
3566         adapter->stats.pf.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
3567         adapter->stats.pf.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
3568         /* Only read FCOE on 82599 */
3569         if (hw->mac.type != ixgbe_mac_82598EB) {
3570                 adapter->stats.pf.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
3571                 adapter->stats.pf.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
3572                 adapter->stats.pf.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
3573                 adapter->stats.pf.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
3574                 adapter->stats.pf.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
3575         }
3576
3577         /* Fill out the OS statistics structure */
3578         IXGBE_SET_IPACKETS(adapter, adapter->stats.pf.gprc);
3579         IXGBE_SET_OPACKETS(adapter, adapter->stats.pf.gptc);
3580         IXGBE_SET_IBYTES(adapter, adapter->stats.pf.gorc);
3581         IXGBE_SET_OBYTES(adapter, adapter->stats.pf.gotc);
3582         IXGBE_SET_IMCASTS(adapter, adapter->stats.pf.mprc);
3583         IXGBE_SET_OMCASTS(adapter, adapter->stats.pf.mptc);
3584         IXGBE_SET_COLLISIONS(adapter, 0);
3585         IXGBE_SET_IQDROPS(adapter, total_missed_rx);
3586         IXGBE_SET_IERRORS(adapter, adapter->stats.pf.crcerrs
3587             + adapter->stats.pf.rlec);
3588 }
3589
3590 #if __FreeBSD_version >= 1100036
3591 static uint64_t
3592 ixgbe_get_counter(struct ifnet *ifp, ift_counter cnt)
3593 {
3594         struct adapter *adapter;
3595
3596         adapter = if_getsoftc(ifp);
3597
3598         switch (cnt) {
3599         case IFCOUNTER_IPACKETS:
3600                 return (adapter->ipackets);
3601         case IFCOUNTER_OPACKETS:
3602                 return (adapter->opackets);
3603         case IFCOUNTER_IBYTES:
3604                 return (adapter->ibytes);
3605         case IFCOUNTER_OBYTES:
3606                 return (adapter->obytes);
3607         case IFCOUNTER_IMCASTS:
3608                 return (adapter->imcasts);
3609         case IFCOUNTER_OMCASTS:
3610                 return (adapter->omcasts);
3611         case IFCOUNTER_COLLISIONS:
3612                 return (0);
3613         case IFCOUNTER_IQDROPS:
3614                 return (adapter->iqdrops);
3615         case IFCOUNTER_IERRORS:
3616                 return (adapter->ierrors);
3617         default:
3618                 return (if_get_counter_default(ifp, cnt));
3619         }
3620 }
3621 #endif
3622
3623 /** ixgbe_sysctl_tdh_handler - Handler function
3624  *  Retrieves the TDH value from the hardware
3625  */
3626 static int 
3627 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
3628 {
3629         int error;
3630
3631         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3632         if (!txr) return 0;
3633
3634         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
3635         error = sysctl_handle_int(oidp, &val, 0, req);
3636         if (error || !req->newptr)
3637                 return error;
3638         return 0;
3639 }
3640
3641 /** ixgbe_sysctl_tdt_handler - Handler function
3642  *  Retrieves the TDT value from the hardware
3643  */
3644 static int 
3645 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
3646 {
3647         int error;
3648
3649         struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
3650         if (!txr) return 0;
3651
3652         unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
3653         error = sysctl_handle_int(oidp, &val, 0, req);
3654         if (error || !req->newptr)
3655                 return error;
3656         return 0;
3657 }
3658
3659 /** ixgbe_sysctl_rdh_handler - Handler function
3660  *  Retrieves the RDH value from the hardware
3661  */
3662 static int 
3663 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
3664 {
3665         int error;
3666
3667         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3668         if (!rxr) return 0;
3669
3670         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
3671         error = sysctl_handle_int(oidp, &val, 0, req);
3672         if (error || !req->newptr)
3673                 return error;
3674         return 0;
3675 }
3676
3677 /** ixgbe_sysctl_rdt_handler - Handler function
3678  *  Retrieves the RDT value from the hardware
3679  */
3680 static int 
3681 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
3682 {
3683         int error;
3684
3685         struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
3686         if (!rxr) return 0;
3687
3688         unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
3689         error = sysctl_handle_int(oidp, &val, 0, req);
3690         if (error || !req->newptr)
3691                 return error;
3692         return 0;
3693 }
3694
3695 static int
3696 ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
3697 {
3698         int error;
3699         struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
3700         unsigned int reg, usec, rate;
3701
3702         reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
3703         usec = ((reg & 0x0FF8) >> 3);
3704         if (usec > 0)
3705                 rate = 500000 / usec;
3706         else
3707                 rate = 0;
3708         error = sysctl_handle_int(oidp, &rate, 0, req);
3709         if (error || !req->newptr)
3710                 return error;
3711         reg &= ~0xfff; /* default, no limitation */
3712         ixgbe_max_interrupt_rate = 0;
3713         if (rate > 0 && rate < 500000) {
3714                 if (rate < 1000)
3715                         rate = 1000;
3716                 ixgbe_max_interrupt_rate = rate;
3717                 reg |= ((4000000/rate) & 0xff8 );
3718         }
3719         IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
3720         return 0;
3721 }
3722
3723 /*
3724  * Add sysctl variables, one per statistic, to the system.
3725  */
3726 static void
3727 ixgbe_add_hw_stats(struct adapter *adapter)
3728 {
3729         device_t dev = adapter->dev;
3730
3731         struct tx_ring *txr = adapter->tx_rings;
3732         struct rx_ring *rxr = adapter->rx_rings;
3733
3734         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
3735         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
3736         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
3737         struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3738
3739         struct sysctl_oid *stat_node, *queue_node;
3740         struct sysctl_oid_list *stat_list, *queue_list;
3741
3742 #define QUEUE_NAME_LEN 32
3743         char namebuf[QUEUE_NAME_LEN];
3744
3745         /* Driver Statistics */
3746         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
3747                         CTLFLAG_RD, &adapter->dropped_pkts,
3748                         "Driver dropped packets");
3749         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
3750                         CTLFLAG_RD, &adapter->mbuf_defrag_failed,
3751                         "m_defrag() failed");
3752         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
3753                         CTLFLAG_RD, &adapter->watchdog_events,
3754                         "Watchdog timeouts");
3755         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
3756                         CTLFLAG_RD, &adapter->vector_irq,
3757                         "Link MSIX IRQ Handled");
3758
3759         for (int i = 0; i < adapter->num_queues; i++, txr++) {
3760                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3761                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3762                                             CTLFLAG_RD, NULL, "Queue Name");
3763                 queue_list = SYSCTL_CHILDREN(queue_node);
3764
3765                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
3766                                 CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
3767                                 sizeof(&adapter->queues[i]),
3768                                 ixgbe_sysctl_interrupt_rate_handler, "IU",
3769                                 "Interrupt Rate");
3770                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
3771                                 CTLFLAG_RD, &(adapter->queues[i].irqs),
3772                                 "irqs on this queue");
3773                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
3774                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
3775                                 ixgbe_sysctl_tdh_handler, "IU",
3776                                 "Transmit Descriptor Head");
3777                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
3778                                 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
3779                                 ixgbe_sysctl_tdt_handler, "IU",
3780                                 "Transmit Descriptor Tail");
3781                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
3782                                 CTLFLAG_RD, &txr->tso_tx,
3783                                 "TSO");
3784                 SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
3785                                 CTLFLAG_RD, &txr->no_tx_dma_setup,
3786                                 "Driver tx dma failure in xmit");
3787                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
3788                                 CTLFLAG_RD, &txr->no_desc_avail,
3789                                 "Queue No Descriptor Available");
3790                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
3791                                 CTLFLAG_RD, &txr->total_packets,
3792                                 "Queue Packets Transmitted");
3793         }
3794
3795         for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3796                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3797                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
3798                                             CTLFLAG_RD, NULL, "Queue Name");
3799                 queue_list = SYSCTL_CHILDREN(queue_node);
3800
3801                 struct lro_ctrl *lro = &rxr->lro;
3802
3803                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
3804                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
3805                                             CTLFLAG_RD, NULL, "Queue Name");
3806                 queue_list = SYSCTL_CHILDREN(queue_node);
3807
3808                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
3809                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
3810                                 ixgbe_sysctl_rdh_handler, "IU",
3811                                 "Receive Descriptor Head");
3812                 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
3813                                 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
3814                                 ixgbe_sysctl_rdt_handler, "IU",
3815                                 "Receive Descriptor Tail");
3816                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
3817                                 CTLFLAG_RD, &rxr->rx_packets,
3818                                 "Queue Packets Received");
3819                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
3820                                 CTLFLAG_RD, &rxr->rx_bytes,
3821                                 "Queue Bytes Received");
3822                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
3823                                 CTLFLAG_RD, &rxr->rx_copies,
3824                                 "Copied RX Frames");
3825                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
3826                                 CTLFLAG_RD, &lro->lro_queued, 0,
3827                                 "LRO Queued");
3828                 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
3829                                 CTLFLAG_RD, &lro->lro_flushed, 0,
3830                                 "LRO Flushed");
3831         }
3832
3833         /* MAC stats get the own sub node */
3834
3835         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
3836                                     CTLFLAG_RD, NULL, "MAC Statistics");
3837         stat_list = SYSCTL_CHILDREN(stat_node);
3838
3839         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
3840                         CTLFLAG_RD, &stats->crcerrs,
3841                         "CRC Errors");
3842         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
3843                         CTLFLAG_RD, &stats->illerrc,
3844                         "Illegal Byte Errors");
3845         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
3846                         CTLFLAG_RD, &stats->errbc,
3847                         "Byte Errors");
3848         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
3849                         CTLFLAG_RD, &stats->mspdc,
3850                         "MAC Short Packets Discarded");
3851         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
3852                         CTLFLAG_RD, &stats->mlfc,
3853                         "MAC Local Faults");
3854         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
3855                         CTLFLAG_RD, &stats->mrfc,
3856                         "MAC Remote Faults");
3857         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
3858                         CTLFLAG_RD, &stats->rlec,
3859                         "Receive Length Errors");
3860
3861         /* Flow Control stats */
3862         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
3863                         CTLFLAG_RD, &stats->lxontxc,
3864                         "Link XON Transmitted");
3865         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
3866                         CTLFLAG_RD, &stats->lxonrxc,
3867                         "Link XON Received");
3868         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
3869                         CTLFLAG_RD, &stats->lxofftxc,
3870                         "Link XOFF Transmitted");
3871         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
3872                         CTLFLAG_RD, &stats->lxoffrxc,
3873                         "Link XOFF Received");
3874
3875         /* Packet Reception Stats */
3876         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
3877                         CTLFLAG_RD, &stats->tor, 
3878                         "Total Octets Received"); 
3879         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
3880                         CTLFLAG_RD, &stats->gorc, 
3881                         "Good Octets Received"); 
3882         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
3883                         CTLFLAG_RD, &stats->tpr,
3884                         "Total Packets Received");
3885         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
3886                         CTLFLAG_RD, &stats->gprc,
3887                         "Good Packets Received");
3888         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
3889                         CTLFLAG_RD, &stats->mprc,
3890                         "Multicast Packets Received");
3891         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
3892                         CTLFLAG_RD, &stats->bprc,
3893                         "Broadcast Packets Received");
3894         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
3895                         CTLFLAG_RD, &stats->prc64,
3896                         "64 byte frames received ");
3897         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
3898                         CTLFLAG_RD, &stats->prc127,
3899                         "65-127 byte frames received");
3900         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
3901                         CTLFLAG_RD, &stats->prc255,
3902                         "128-255 byte frames received");
3903         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
3904                         CTLFLAG_RD, &stats->prc511,
3905                         "256-511 byte frames received");
3906         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
3907                         CTLFLAG_RD, &stats->prc1023,
3908                         "512-1023 byte frames received");
3909         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
3910                         CTLFLAG_RD, &stats->prc1522,
3911                         "1023-1522 byte frames received");
3912         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
3913                         CTLFLAG_RD, &stats->ruc,
3914                         "Receive Undersized");
3915         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
3916                         CTLFLAG_RD, &stats->rfc,
3917                         "Fragmented Packets Received ");
3918         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
3919                         CTLFLAG_RD, &stats->roc,
3920                         "Oversized Packets Received");
3921         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
3922                         CTLFLAG_RD, &stats->rjc,
3923                         "Received Jabber");
3924         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
3925                         CTLFLAG_RD, &stats->mngprc,
3926                         "Management Packets Received");
3927         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
3928                         CTLFLAG_RD, &stats->mngptc,
3929                         "Management Packets Dropped");
3930         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
3931                         CTLFLAG_RD, &stats->xec,
3932                         "Checksum Errors");
3933
3934         /* Packet Transmission Stats */
3935         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
3936                         CTLFLAG_RD, &stats->gotc, 
3937                         "Good Octets Transmitted"); 
3938         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
3939                         CTLFLAG_RD, &stats->tpt,
3940                         "Total Packets Transmitted");
3941         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
3942                         CTLFLAG_RD, &stats->gptc,
3943                         "Good Packets Transmitted");
3944         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
3945                         CTLFLAG_RD, &stats->bptc,
3946                         "Broadcast Packets Transmitted");
3947         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
3948                         CTLFLAG_RD, &stats->mptc,
3949                         "Multicast Packets Transmitted");
3950         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
3951                         CTLFLAG_RD, &stats->mngptc,
3952                         "Management Packets Transmitted");
3953         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
3954                         CTLFLAG_RD, &stats->ptc64,
3955                         "64 byte frames transmitted ");
3956         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
3957                         CTLFLAG_RD, &stats->ptc127,
3958                         "65-127 byte frames transmitted");
3959         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
3960                         CTLFLAG_RD, &stats->ptc255,
3961                         "128-255 byte frames transmitted");
3962         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
3963                         CTLFLAG_RD, &stats->ptc511,
3964                         "256-511 byte frames transmitted");
3965         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
3966                         CTLFLAG_RD, &stats->ptc1023,
3967                         "512-1023 byte frames transmitted");
3968         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
3969                         CTLFLAG_RD, &stats->ptc1522,
3970                         "1024-1522 byte frames transmitted");
3971 }
3972
3973 /*
3974 ** Set flow control using sysctl:
3975 ** Flow control values:
3976 **      0 - off
3977 **      1 - rx pause
3978 **      2 - tx pause
3979 **      3 - full
3980 */
3981 static int
3982 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
3983 {
3984         int error, last;
3985         struct adapter *adapter = (struct adapter *) arg1;
3986
3987         last = adapter->fc;
3988         error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
3989         if ((error) || (req->newptr == NULL))
3990                 return (error);
3991
3992         /* Don't bother if it's not changed */
3993         if (adapter->fc == last)
3994                 return (0);
3995
3996         switch (adapter->fc) {
3997                 case ixgbe_fc_rx_pause:
3998                 case ixgbe_fc_tx_pause:
3999                 case ixgbe_fc_full:
4000                         adapter->hw.fc.requested_mode = adapter->fc;
4001                         if (adapter->num_queues > 1)
4002                                 ixgbe_disable_rx_drop(adapter);
4003                         break;
4004                 case ixgbe_fc_none:
4005                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
4006                         if (adapter->num_queues > 1)
4007                                 ixgbe_enable_rx_drop(adapter);
4008                         break;
4009                 default:
4010                         adapter->fc = last;
4011                         return (EINVAL);
4012         }
4013         /* Don't autoneg if forcing a value */
4014         adapter->hw.fc.disable_fc_autoneg = TRUE;
4015         ixgbe_fc_enable(&adapter->hw);
4016         return error;
4017 }
4018
4019 /*
4020 ** Control advertised link speed:
4021 **      Flags:
4022 **      0x1 - advertise 100 Mb
4023 **      0x2 - advertise 1G
4024 **      0x4 - advertise 10G
4025 */
4026 static int
4027 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
4028 {
4029         int                     error = 0, requested;
4030         struct adapter          *adapter;
4031         device_t                dev;
4032         struct ixgbe_hw         *hw;
4033         ixgbe_link_speed        speed = 0;
4034
4035         adapter = (struct adapter *) arg1;
4036         dev = adapter->dev;
4037         hw = &adapter->hw;
4038
4039         requested = adapter->advertise;
4040         error = sysctl_handle_int(oidp, &requested, 0, req);
4041         if ((error) || (req->newptr == NULL))
4042                 return (error);
4043
4044         /* Checks to validate new value */
4045         if (adapter->advertise == requested) /* no change */
4046                 return (0);
4047
4048         if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
4049             (hw->phy.multispeed_fiber))) {
4050                 device_printf(dev,
4051                     "Advertised speed can only be set on copper or "
4052                     "multispeed fiber media types.\n");
4053                 return (EINVAL);
4054         }
4055
4056         if (requested < 0x1 || requested > 0x7) {
4057                 device_printf(dev,
4058                     "Invalid advertised speed; valid modes are 0x1 through 0x7\n");
4059                 return (EINVAL);
4060         }
4061
4062         if ((requested & 0x1)
4063             && (hw->mac.type != ixgbe_mac_X540)
4064             && (hw->mac.type != ixgbe_mac_X550)) {
4065                 device_printf(dev, "Set Advertise: 100Mb on X540/X550 only\n");
4066                 return (EINVAL);
4067         }
4068
4069         /* Set new value and report new advertised mode */
4070         if (requested & 0x1)
4071                 speed |= IXGBE_LINK_SPEED_100_FULL;
4072         if (requested & 0x2)
4073                 speed |= IXGBE_LINK_SPEED_1GB_FULL;
4074         if (requested & 0x4)
4075                 speed |= IXGBE_LINK_SPEED_10GB_FULL;
4076
4077         hw->mac.autotry_restart = TRUE;
4078         hw->mac.ops.setup_link(hw, speed, TRUE);
4079         adapter->advertise = requested;
4080
4081         return (error);
4082 }
4083
4084 /*
4085 ** Thermal Shutdown Trigger
4086 **   - cause a Thermal Overtemp IRQ
4087 **   - this now requires firmware enabling
4088 */
4089 static int
4090 ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
4091 {
4092         int             error, fire = 0;
4093         struct adapter  *adapter = (struct adapter *) arg1;
4094         struct ixgbe_hw *hw = &adapter->hw;
4095
4096
4097         if (hw->mac.type < ixgbe_mac_X540)
4098                 return (0);
4099
4100         error = sysctl_handle_int(oidp, &fire, 0, req);
4101         if ((error) || (req->newptr == NULL))
4102                 return (error);
4103
4104         if (fire) {
4105                 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
4106                 reg |= IXGBE_EICR_TS;
4107                 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
4108         }
4109
4110         return (0);
4111 }
4112
4113 /*
4114 ** Enable the hardware to drop packets when the buffer is
4115 ** full. This is useful when multiqueue,so that no single
4116 ** queue being full stalls the entire RX engine. We only
4117 ** enable this when Multiqueue AND when Flow Control is 
4118 ** disabled.
4119 */
4120 static void
4121 ixgbe_enable_rx_drop(struct adapter *adapter)
4122 {
4123         struct ixgbe_hw *hw = &adapter->hw;
4124
4125         for (int i = 0; i < adapter->num_queues; i++) {
4126                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4127                 srrctl |= IXGBE_SRRCTL_DROP_EN;
4128                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4129         }
4130 }
4131
4132 static void
4133 ixgbe_disable_rx_drop(struct adapter *adapter)
4134 {
4135         struct ixgbe_hw *hw = &adapter->hw;
4136
4137         for (int i = 0; i < adapter->num_queues; i++) {
4138                 u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4139                 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
4140                 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4141         }
4142 }
4143
4144 static void
4145 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
4146 {
4147         u32 mask;
4148
4149         switch (adapter->hw.mac.type) {
4150         case ixgbe_mac_82598EB:
4151                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
4152                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
4153                 break;
4154         case ixgbe_mac_82599EB:
4155         case ixgbe_mac_X540:
4156         case ixgbe_mac_X550:
4157                 mask = (queues & 0xFFFFFFFF);
4158                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
4159                 mask = (queues >> 32);
4160                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
4161                 break;
4162         default:
4163                 break;
4164         }
4165 }
4166
4167