]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/dev/ixgbe/ixgbe.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / dev / ixgbe / ixgbe.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2009, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #endif
38
39 #include "ixgbe.h"
40
41 /*********************************************************************
42  *  Set this to one to display debug statistics
43  *********************************************************************/
44 int             ixgbe_display_debug_stats = 0;
45
46 /*********************************************************************
47  *  Driver version
48  *********************************************************************/
49 char ixgbe_driver_version[] = "1.7.4";
50
51 /*********************************************************************
52  *  PCI Device ID Table
53  *
54  *  Used by probe to select devices to load on
55  *  Last field stores an index into ixgbe_strings
56  *  Last entry must be all 0s
57  *
58  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  *********************************************************************/
60
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
62 {
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
76         /* required last entry */
77         {0, 0, 0, 0, 0}
78 };
79
80 /*********************************************************************
81  *  Table of branding strings
82  *********************************************************************/
83
84 static char    *ixgbe_strings[] = {
85         "Intel(R) PRO/10GbE PCI-Express Network Driver"
86 };
87
88 /*********************************************************************
89  *  Function prototypes
90  *********************************************************************/
91 static int      ixgbe_probe(device_t);
92 static int      ixgbe_attach(device_t);
93 static int      ixgbe_detach(device_t);
94 static int      ixgbe_shutdown(device_t);
95 static void     ixgbe_start(struct ifnet *);
96 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
97 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
98 static void     ixgbe_watchdog(struct adapter *);
99 static void     ixgbe_init(void *);
100 static void     ixgbe_init_locked(struct adapter *);
101 static void     ixgbe_stop(void *);
102 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
103 static int      ixgbe_media_change(struct ifnet *);
104 static void     ixgbe_identify_hardware(struct adapter *);
105 static int      ixgbe_allocate_pci_resources(struct adapter *);
106 static int      ixgbe_allocate_msix(struct adapter *);
107 static int      ixgbe_allocate_legacy(struct adapter *);
108 static int      ixgbe_allocate_queues(struct adapter *);
109 #if __FreeBSD_version >= 602105
110 static int      ixgbe_setup_msix(struct adapter *);
111 #endif
112 static void     ixgbe_free_pci_resources(struct adapter *);
113 static void     ixgbe_local_timer(void *);
114 static int      ixgbe_hardware_init(struct adapter *);
115 static void     ixgbe_setup_interface(device_t, struct adapter *);
116
117 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
118 static int      ixgbe_setup_transmit_structures(struct adapter *);
119 static void     ixgbe_setup_transmit_ring(struct tx_ring *);
120 static void     ixgbe_initialize_transmit_units(struct adapter *);
121 static void     ixgbe_free_transmit_structures(struct adapter *);
122 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
123
124 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
125 static int      ixgbe_setup_receive_structures(struct adapter *);
126 static int      ixgbe_setup_receive_ring(struct rx_ring *);
127 static void     ixgbe_initialize_receive_units(struct adapter *);
128 static void     ixgbe_free_receive_structures(struct adapter *);
129 static void     ixgbe_free_receive_buffers(struct rx_ring *);
130
131 static void     ixgbe_init_moderation(struct adapter *);
132 static void     ixgbe_enable_intr(struct adapter *);
133 static void     ixgbe_disable_intr(struct adapter *);
134 static void     ixgbe_update_stats_counters(struct adapter *);
135 static bool     ixgbe_txeof(struct tx_ring *);
136 static bool     ixgbe_rxeof(struct rx_ring *, int);
137 static void     ixgbe_rx_checksum(u32, struct mbuf *);
138 static void     ixgbe_set_promisc(struct adapter *);
139 static void     ixgbe_disable_promisc(struct adapter *);
140 static void     ixgbe_set_multi(struct adapter *);
141 static void     ixgbe_print_hw_stats(struct adapter *);
142 static void     ixgbe_print_debug_info(struct adapter *);
143 static void     ixgbe_update_link_status(struct adapter *);
144 static int      ixgbe_get_buf(struct rx_ring *, int, u8);
145 static int      ixgbe_xmit(struct tx_ring *, struct mbuf **);
146 static int      ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
147 static int      ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
148 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
149 static int      ixgbe_dma_malloc(struct adapter *, bus_size_t,
150                     struct ixgbe_dma_alloc *, int);
151 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
152 static void     ixgbe_add_rx_process_limit(struct adapter *, const char *,
153                     const char *, int *, int);
154 static int      ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
155 static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
156 static void     ixgbe_set_ivar(struct adapter *, u16, u8, s8);
157 static void     ixgbe_configure_ivars(struct adapter *);
158 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
159
160 #ifdef IXGBE_HW_VLAN_SUPPORT
161 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
162 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
163 #endif
164
165 #ifdef IXGBE_TIMESYNC
166 /* Precision Time sync support */
167 static int ixgbe_tsync_init(struct adapter *);
168 static void ixgbe_tsync_disable(struct adapter *);
169 #endif
170
171 static void     ixgbe_update_aim(struct rx_ring *);
172
173 /* Support for pluggable optic modules */
174 static bool     ixgbe_sfp_probe(struct adapter *);
175
176 /* Legacy (single vector interrupt handler */
177 static void     ixgbe_legacy_irq(void *);
178
179 #if __FreeBSD_version >= 602105
180 /* The MSI/X Interrupt handlers */
181 static void     ixgbe_msix_tx(void *);
182 static void     ixgbe_msix_rx(void *);
183 static void     ixgbe_msix_link(void *);
184 #endif
185
186 /* Deferred interrupt tasklets */
187 static void     ixgbe_handle_tx(void *, int);
188 static void     ixgbe_handle_rx(void *, int);
189 static void     ixgbe_handle_link(void *, int);
190 static void     ixgbe_handle_msf(void *, int);
191 static void     ixgbe_handle_mod(void *, int);
192
193
194 /*********************************************************************
195  *  FreeBSD Device Interface Entry Points
196  *********************************************************************/
197
198 static device_method_t ixgbe_methods[] = {
199         /* Device interface */
200         DEVMETHOD(device_probe, ixgbe_probe),
201         DEVMETHOD(device_attach, ixgbe_attach),
202         DEVMETHOD(device_detach, ixgbe_detach),
203         DEVMETHOD(device_shutdown, ixgbe_shutdown),
204         {0, 0}
205 };
206
207 static driver_t ixgbe_driver = {
208         "ix", ixgbe_methods, sizeof(struct adapter),
209 };
210
211 static devclass_t ixgbe_devclass;
212 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
213
214 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
215 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
216
217 /*
218 ** TUNEABLE PARAMETERS:
219 */
220
221 /*
222 ** These  parameters are used in Adaptive 
223 ** Interrupt Moderation. The value is set
224 ** into EITR and controls the interrupt
225 ** frequency. They can be modified but 
226 ** be careful in tuning them.
227 */
228 static int ixgbe_enable_aim = TRUE;
229 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
230 static int ixgbe_low_latency = IXGBE_LOW_LATENCY;
231 TUNABLE_INT("hw.ixgbe.low_latency", &ixgbe_low_latency);
232 static int ixgbe_ave_latency = IXGBE_AVE_LATENCY;
233 TUNABLE_INT("hw.ixgbe.ave_latency", &ixgbe_ave_latency);
234 static int ixgbe_bulk_latency = IXGBE_BULK_LATENCY;
235 TUNABLE_INT("hw.ixgbe.bulk_latency", &ixgbe_bulk_latency);
236
237 /* How many packets rxeof tries to clean at a time */
238 static int ixgbe_rx_process_limit = 100;
239 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
240
241 /* Flow control setting, default to off */
242 static int ixgbe_flow_control = ixgbe_fc_full;
243 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
244
245 /*
246  * Should the driver do LRO on the RX end
247  *  this can be toggled on the fly, but the
248  *  interface must be reset (down/up) for it
249  *  to take effect.  
250  */
251 static int ixgbe_enable_lro = 1;
252 TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro);
253
254 /*
255  * MSIX should be the default for best performance,
256  * but this allows it to be forced off for testing.
257  */
258 #if __FreeBSD_version >= 602105
259 static int ixgbe_enable_msix = 1;
260 #else
261 static int ixgbe_enable_msix = 0;
262 #endif
263 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
264
265 /*
266  * Enable RX Header Split
267  *   WARNING: disable this if bridging or forwarding!!
268  */
269 static int ixgbe_rx_hdr_split = 1;
270 TUNABLE_INT("hw.ixgbe.rx_hdr_split", &ixgbe_rx_hdr_split);
271
272 /*
273  * Number of TX/RX Queues, with 0 setting
274  * it autoconfigures to the number of cpus.
275  */
276 static int ixgbe_tx_queues = 1;
277 TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
278 static int ixgbe_rx_queues = 1;
279 TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
280
281 /* Number of TX descriptors per ring */
282 static int ixgbe_txd = DEFAULT_TXD;
283 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
284
285 /* Number of RX descriptors per ring */
286 static int ixgbe_rxd = DEFAULT_RXD;
287 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
288
289 /* Total number of Interfaces - need for config sanity check */
290 static int ixgbe_total_ports;
291
292 /*
293 ** The number of scatter-gather segments
294 ** differs for 82598 and 82599, default to
295 ** the former.
296 */
297 static int ixgbe_num_segs = IXGBE_82598_SCATTER;
298
299 /*********************************************************************
300  *  Device identification routine
301  *
302  *  ixgbe_probe determines if the driver should be loaded on
303  *  adapter based on PCI vendor/device id of the adapter.
304  *
305  *  return 0 on success, positive on failure
306  *********************************************************************/
307
308 static int
309 ixgbe_probe(device_t dev)
310 {
311         ixgbe_vendor_info_t *ent;
312
313         u16     pci_vendor_id = 0;
314         u16     pci_device_id = 0;
315         u16     pci_subvendor_id = 0;
316         u16     pci_subdevice_id = 0;
317         char    adapter_name[256];
318
319         INIT_DEBUGOUT("ixgbe_probe: begin");
320
321         pci_vendor_id = pci_get_vendor(dev);
322         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
323                 return (ENXIO);
324
325         pci_device_id = pci_get_device(dev);
326         pci_subvendor_id = pci_get_subvendor(dev);
327         pci_subdevice_id = pci_get_subdevice(dev);
328
329         ent = ixgbe_vendor_info_array;
330         while (ent->vendor_id != 0) {
331                 if ((pci_vendor_id == ent->vendor_id) &&
332                     (pci_device_id == ent->device_id) &&
333
334                     ((pci_subvendor_id == ent->subvendor_id) ||
335                      (ent->subvendor_id == 0)) &&
336
337                     ((pci_subdevice_id == ent->subdevice_id) ||
338                      (ent->subdevice_id == 0))) {
339                         sprintf(adapter_name, "%s, Version - %s",
340                                 ixgbe_strings[ent->index],
341                                 ixgbe_driver_version);
342                         device_set_desc_copy(dev, adapter_name);
343                         ++ixgbe_total_ports;
344                         return (0);
345                 }
346                 ent++;
347         }
348         return (ENXIO);
349 }
350
351 /*********************************************************************
352  *  Device initialization routine
353  *
354  *  The attach entry point is called when the driver is being loaded.
355  *  This routine identifies the type of hardware, allocates all resources
356  *  and initializes the hardware.
357  *
358  *  return 0 on success, positive on failure
359  *********************************************************************/
360
361 static int
362 ixgbe_attach(device_t dev)
363 {
364         struct adapter *adapter;
365         struct ixgbe_hw *hw;
366         int             error = 0;
367         u16             pci_device_id;
368         u32             ctrl_ext;
369
370         INIT_DEBUGOUT("ixgbe_attach: begin");
371
372         /* Allocate, clear, and link in our adapter structure */
373         adapter = device_get_softc(dev);
374         adapter->dev = adapter->osdep.dev = dev;
375         hw = &adapter->hw;
376
377         /* Core Lock Init*/
378         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
379
380         /* Keep track of optics */
381         pci_device_id = pci_get_device(dev);
382         switch (pci_device_id) {
383                 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT :
384                 case IXGBE_DEV_ID_82598EB_CX4 :
385                         adapter->optics = IFM_10G_CX4;
386                         break;
387                 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
388                 case IXGBE_DEV_ID_82598_DA_DUAL_PORT :
389                 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
390                 case IXGBE_DEV_ID_82598AT :
391                         adapter->optics = IFM_10G_SR;
392                         break;
393                 case IXGBE_DEV_ID_82598EB_XF_LR :
394                         adapter->optics = IFM_10G_LR;
395                         break;
396                 case IXGBE_DEV_ID_82599_SFP :
397                         adapter->optics = IFM_10G_SR;
398                         ixgbe_num_segs = IXGBE_82599_SCATTER;
399                         break;
400                 case IXGBE_DEV_ID_82599_KX4 :
401                         adapter->optics = IFM_10G_CX4;
402                         ixgbe_num_segs = IXGBE_82599_SCATTER;
403                 default:
404                         break;
405         }
406
407         /* SYSCTL APIs */
408         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
409                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
410                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
411                         adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
412
413         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
414                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
415                         OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
416                         adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
417
418         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
420                         OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
421                         adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
422
423         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
424                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
425                         OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW,
426                         &ixgbe_enable_lro, 1, "Large Receive Offload");
427
428         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
429                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
430                         OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
431                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
432
433         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
434                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
435                         OID_AUTO, "low_latency", CTLTYPE_INT|CTLFLAG_RW,
436                         &ixgbe_low_latency, 1, "Low Latency");
437
438         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
439                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
440                         OID_AUTO, "ave_latency", CTLTYPE_INT|CTLFLAG_RW,
441                         &ixgbe_ave_latency, 1, "Average Latency");
442
443         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
444                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
445                         OID_AUTO, "bulk_latency", CTLTYPE_INT|CTLFLAG_RW,
446                         &ixgbe_bulk_latency, 1, "Bulk Latency");
447
448         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
449                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
450                         OID_AUTO, "hdr_split", CTLTYPE_INT|CTLFLAG_RW,
451                         &ixgbe_rx_hdr_split, 1, "RX Header Split");
452
453         /* Set up the timer callout */
454         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
455
456         /* Determine hardware revision */
457         ixgbe_identify_hardware(adapter);
458
459         /* Do base PCI setup - map BAR0 */
460         if (ixgbe_allocate_pci_resources(adapter)) {
461                 device_printf(dev, "Allocation of PCI resources failed\n");
462                 error = ENXIO;
463                 goto err_out;
464         }
465
466         /* Do descriptor calc and sanity checks */
467         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
468             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
469                 device_printf(dev, "TXD config issue, using default!\n");
470                 adapter->num_tx_desc = DEFAULT_TXD;
471         } else
472                 adapter->num_tx_desc = ixgbe_txd;
473
474         /*
475         ** With many RX rings it is easy to exceed the
476         ** system mbuf allocation. Tuning nmbclusters
477         ** can alleviate this.
478         */
479         if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
480                 int s;
481                 /* Calculate the total RX mbuf needs */
482                 s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
483                 if (s > nmbclusters) {
484                         device_printf(dev, "RX Descriptors exceed "
485                             "system mbuf max, using default instead!\n");
486                         ixgbe_rxd = DEFAULT_RXD;
487                 }
488         }
489
490         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
491             ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
492                 device_printf(dev, "RXD config issue, using default!\n");
493                 adapter->num_rx_desc = DEFAULT_RXD;
494         } else
495                 adapter->num_rx_desc = ixgbe_rxd;
496
497         /* Allocate our TX/RX Queues */
498         if (ixgbe_allocate_queues(adapter)) {
499                 error = ENOMEM;
500                 goto err_out;
501         }
502
503         /* Initialize the shared code */
504         error = ixgbe_init_shared_code(hw);
505         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
506                 /*
507                 ** No optics in this port, set up
508                 ** so the timer routine will probe 
509                 ** for later insertion.
510                 */
511                 adapter->sfp_probe = TRUE;
512                 error = 0;
513         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
514                 device_printf(dev,"Unsupported SFP+ module detected!\n");
515                 error = EIO;
516                 goto err_late;
517         } else if (error) {
518                 device_printf(dev,"Unable to initialize the shared code\n");
519                 error = EIO;
520                 goto err_late;
521         }
522
523         /* Initialize the hardware */
524         if (ixgbe_hardware_init(adapter)) {
525                 device_printf(dev,"Unable to initialize the hardware\n");
526                 error = EIO;
527                 goto err_late;
528         }
529
530         if ((adapter->msix > 1) && (ixgbe_enable_msix))
531                 error = ixgbe_allocate_msix(adapter); 
532         else
533                 error = ixgbe_allocate_legacy(adapter); 
534         if (error) 
535                 goto err_late;
536
537         /* Setup OS specific network interface */
538         ixgbe_setup_interface(dev, adapter);
539
540         /* Sysctl for limiting the amount of work done in the taskqueue */
541         ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
542             "max number of rx packets to process", &adapter->rx_process_limit,
543             ixgbe_rx_process_limit);
544
545         /* Initialize statistics */
546         ixgbe_update_stats_counters(adapter);
547
548 #ifdef IXGBE_HW_VLAN_SUPPORT
549         /* Register for VLAN events */
550         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
551             ixgbe_register_vlan, 0, EVENTHANDLER_PRI_FIRST);
552         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
553             ixgbe_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST);
554 #endif
555
556         /* let hardware know driver is loaded */
557         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
558         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
559         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
560
561         INIT_DEBUGOUT("ixgbe_attach: end");
562         return (0);
563 err_late:
564         ixgbe_free_transmit_structures(adapter);
565         ixgbe_free_receive_structures(adapter);
566 err_out:
567         ixgbe_free_pci_resources(adapter);
568         return (error);
569
570 }
571
572 /*********************************************************************
573  *  Device removal routine
574  *
575  *  The detach entry point is called when the driver is being removed.
576  *  This routine stops the adapter and deallocates all the resources
577  *  that were allocated for driver operation.
578  *
579  *  return 0 on success, positive on failure
580  *********************************************************************/
581
582 static int
583 ixgbe_detach(device_t dev)
584 {
585         struct adapter *adapter = device_get_softc(dev);
586         struct tx_ring *txr = adapter->tx_rings;
587         struct rx_ring *rxr = adapter->rx_rings;
588         u32     ctrl_ext;
589
590         INIT_DEBUGOUT("ixgbe_detach: begin");
591
592         /* Make sure VLANS are not using driver */
593 #if __FreeBSD_version >= 700000
594         if (adapter->ifp->if_vlantrunk != NULL) {
595 #else
596         if (adapter->ifp->if_nvlans != 0) {
597 #endif
598                 device_printf(dev,"Vlan in use, detach first\n");
599                 return (EBUSY);
600         }
601
602         IXGBE_CORE_LOCK(adapter);
603         ixgbe_stop(adapter);
604         IXGBE_CORE_UNLOCK(adapter);
605
606         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
607                 if (txr->tq) {
608                         taskqueue_drain(txr->tq, &txr->tx_task);
609                         taskqueue_free(txr->tq);
610                 }
611         }
612
613         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
614                 if (rxr->tq) {
615                         taskqueue_drain(rxr->tq, &rxr->rx_task);
616                         taskqueue_free(rxr->tq);
617                 }
618         }
619
620         /* Drain the Link queue */
621         if (adapter->tq) {
622                 taskqueue_drain(adapter->tq, &adapter->link_task);
623                 taskqueue_drain(adapter->tq, &adapter->mod_task);
624                 taskqueue_drain(adapter->tq, &adapter->msf_task);
625                 taskqueue_free(adapter->tq);
626         }
627
628         /* let hardware know driver is unloading */
629         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
630         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
631         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
632
633 #ifdef IXGBE_HW_VLAN_SUPPORT
634         /* Unregister VLAN events */
635         if (adapter->vlan_attach != NULL)
636                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
637         if (adapter->vlan_detach != NULL)
638                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
639 #endif          
640
641         ether_ifdetach(adapter->ifp);
642         callout_drain(&adapter->timer);
643         ixgbe_free_pci_resources(adapter);
644         bus_generic_detach(dev);
645         if_free(adapter->ifp);
646
647         ixgbe_free_transmit_structures(adapter);
648         ixgbe_free_receive_structures(adapter);
649
650         IXGBE_CORE_LOCK_DESTROY(adapter);
651         return (0);
652 }
653
654 /*********************************************************************
655  *
656  *  Shutdown entry point
657  *
658  **********************************************************************/
659
660 static int
661 ixgbe_shutdown(device_t dev)
662 {
663         struct adapter *adapter = device_get_softc(dev);
664         IXGBE_CORE_LOCK(adapter);
665         ixgbe_stop(adapter);
666         IXGBE_CORE_UNLOCK(adapter);
667         return (0);
668 }
669
670
671 /*********************************************************************
672  *  Transmit entry point
673  *
674  *  ixgbe_start is called by the stack to initiate a transmit.
675  *  The driver will remain in this routine as long as there are
676  *  packets to transmit and transmit resources are available.
677  *  In case resources are not available stack is notified and
678  *  the packet is requeued.
679  **********************************************************************/
680
681 static void
682 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
683 {
684         struct mbuf    *m_head;
685         struct adapter *adapter = txr->adapter;
686
687         IXGBE_TX_LOCK_ASSERT(txr);
688
689         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
690             IFF_DRV_RUNNING)
691                 return;
692         if (!adapter->link_active)
693                 return;
694
695         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
696
697                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
698                 if (m_head == NULL)
699                         break;
700                 /*
701                  * Force a cleanup if number of TX descriptors
702                  * available is below the threshold. If it fails
703                  * to get above, then abort transmit.
704                  */
705                 if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
706                         ixgbe_txeof(txr);
707                         /* Make sure things have improved */
708                         if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
709                                 txr->no_tx_desc_avail++;
710                                 break;
711                         }
712                 }
713
714                 if (ixgbe_xmit(txr, &m_head)) {
715                         if (m_head == NULL)
716                                 break;
717                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
718                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
719                         break;
720                 }
721                 /* Send a copy of the frame to the BPF listener */
722                 ETHER_BPF_MTAP(ifp, m_head);
723
724                 /* Set timeout in case hardware has problems transmitting */
725                 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
726
727         }
728         return;
729 }
730
731
732 static void
733 ixgbe_start(struct ifnet *ifp)
734 {
735         struct adapter *adapter = ifp->if_softc;
736         struct tx_ring  *txr = adapter->tx_rings;
737         u32 queue = 0;
738
739         /*
740         ** This is really just here for testing
741         ** TX multiqueue, ultimately what is
742         ** needed is the flow support in the stack
743         ** and appropriate logic here to deal with
744         ** it. -jfv
745         */
746         if (adapter->num_tx_queues > 1)
747                 queue = (curcpu % adapter->num_tx_queues);
748
749         txr = &adapter->tx_rings[queue];
750
751         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
752                 if (IXGBE_TX_TRYLOCK(txr) == 0)
753                         return;
754                 ixgbe_start_locked(txr, ifp);
755                 IXGBE_TX_UNLOCK(txr);
756         }
757         return;
758 }
759
760 /*********************************************************************
761  *  Ioctl entry point
762  *
763  *  ixgbe_ioctl is called when the user wants to configure the
764  *  interface.
765  *
766  *  return 0 on success, positive on failure
767  **********************************************************************/
768
769 static int
770 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
771 {
772         int             error = 0;
773         struct ifreq   *ifr = (struct ifreq *) data;
774         struct ifaddr   *ifa = (struct ifaddr *) data;
775         struct adapter *adapter = ifp->if_softc;
776
777         switch (command) {
778         case SIOCSIFADDR:
779                 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
780                 if (ifa->ifa_addr->sa_family == AF_INET) {
781                         ifp->if_flags |= IFF_UP;
782                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
783                                 IXGBE_CORE_LOCK(adapter);
784                                 ixgbe_init_locked(adapter);
785                                 IXGBE_CORE_UNLOCK(adapter);
786                         }
787                         arp_ifinit(ifp, ifa);
788                 } else
789                         ether_ioctl(ifp, command, data);
790                 break;
791         case SIOCSIFMTU:
792                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
793                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
794                         error = EINVAL;
795                 } else {
796                         IXGBE_CORE_LOCK(adapter);
797                         ifp->if_mtu = ifr->ifr_mtu;
798                         adapter->max_frame_size =
799                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
800                         ixgbe_init_locked(adapter);
801                         IXGBE_CORE_UNLOCK(adapter);
802                 }
803                 break;
804         case SIOCSIFFLAGS:
805                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
806                 IXGBE_CORE_LOCK(adapter);
807                 if (ifp->if_flags & IFF_UP) {
808                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
809                                 if ((ifp->if_flags ^ adapter->if_flags) &
810                                     (IFF_PROMISC | IFF_ALLMULTI)) {
811                                         ixgbe_disable_promisc(adapter);
812                                         ixgbe_set_promisc(adapter);
813                                 }
814                         } else
815                                 ixgbe_init_locked(adapter);
816                 } else
817                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
818                                 ixgbe_stop(adapter);
819                 adapter->if_flags = ifp->if_flags;
820                 IXGBE_CORE_UNLOCK(adapter);
821                 break;
822         case SIOCADDMULTI:
823         case SIOCDELMULTI:
824                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
825                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
826                         IXGBE_CORE_LOCK(adapter);
827                         ixgbe_disable_intr(adapter);
828                         ixgbe_set_multi(adapter);
829                         ixgbe_enable_intr(adapter);
830                         IXGBE_CORE_UNLOCK(adapter);
831                 }
832                 break;
833         case SIOCSIFMEDIA:
834         case SIOCGIFMEDIA:
835                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
836                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
837                 break;
838         case SIOCSIFCAP:
839         {
840                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
841                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
842                 if (mask & IFCAP_HWCSUM)
843                         ifp->if_capenable ^= IFCAP_HWCSUM;
844                 if (mask & IFCAP_TSO4)
845                         ifp->if_capenable ^= IFCAP_TSO4;
846                 if (mask & IFCAP_VLAN_HWTAGGING)
847                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
848                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
849                         ixgbe_init(adapter);
850 #if __FreeBSD_version >= 700000
851                 VLAN_CAPABILITIES(ifp);
852 #endif
853                 break;
854         }
855 #ifdef IXGBE_TIMESYNC
856         /*
857         ** IOCTL support for Precision Time (IEEE 1588) Support
858         */
859         case IXGBE_TIMESYNC_READTS:
860             {
861                 u32 rx_ctl, tx_ctl;
862                 struct ixgbe_tsync_read *tdata;
863
864                 tdata = (struct ixgbe_tsync_read *) ifr->ifr_data;
865
866                 if (tdata->read_current_time) {
867                         getnanotime(&tdata->system_time);
868                         tdata->network_time = IXGBE_READ_REG(&adapter->hw,
869                             IXGBE_SYSTIML);
870                         tdata->network_time |=
871                             (u64)IXGBE_READ_REG(&adapter->hw,
872                             IXGBE_SYSTIMH ) << 32;
873                 }
874   
875                 rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
876                 tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
877  
878                 if (rx_ctl & 0x1) {
879                         u32 tmp;
880                         unsigned char *tmp_cp;
881
882                         tdata->rx_valid = 1;
883                         tdata->rx_stamp = IXGBE_READ_REG(&adapter->hw,
884                             IXGBE_RXSTMPL);
885                         tdata->rx_stamp |= (u64)IXGBE_READ_REG(&adapter->hw,
886                             IXGBE_RXSTMPH) << 32;
887
888                         tmp = IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRL);
889                         tmp_cp = (unsigned char *) &tmp;
890                         tdata->srcid[0] = tmp_cp[0];
891                         tdata->srcid[1] = tmp_cp[1];
892                         tdata->srcid[2] = tmp_cp[2];
893                         tdata->srcid[3] = tmp_cp[3];
894                         tmp = IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRH);
895                         tmp_cp = (unsigned char *) &tmp;
896                         tdata->srcid[4] = tmp_cp[0];
897                         tdata->srcid[5] = tmp_cp[1];
898                         tdata->seqid = tmp >> 16;
899                         tdata->seqid = htons(tdata->seqid);
900                 } else
901                         tdata->rx_valid = 0;
902
903                 if (tx_ctl & 0x1) {
904                         tdata->tx_valid = 1;
905                         tdata->tx_stamp = IXGBE_READ_REG(&adapter->hw,
906                             IXGBE_TXSTMPL);
907                         tdata->tx_stamp |= (u64) IXGBE_READ_REG(&adapter->hw,
908                             IXGBE_TXSTMPH) << 32;
909                 } else
910                         tdata->tx_valid = 0;
911
912                 return (0);
913             }
914 #endif  /* IXGBE_TIMESYNC */
915
916         default:
917                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
918                 error = ether_ioctl(ifp, command, data);
919                 break;
920         }
921
922         return (error);
923 }
924
925 /*********************************************************************
926  *  Watchdog entry point
927  *
928  *  This routine is called by the local timer
929  *  to detect hardware hangs .
930  *
931  **********************************************************************/
932
933 static void
934 ixgbe_watchdog(struct adapter *adapter)
935 {
936         device_t        dev = adapter->dev;
937         struct tx_ring *txr = adapter->tx_rings;
938         struct ixgbe_hw *hw = &adapter->hw;
939         bool            tx_hang = FALSE;
940
941         IXGBE_CORE_LOCK_ASSERT(adapter);
942
943         /*
944          * The timer is set to 5 every time ixgbe_start() queues a packet.
945          * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
946          * least one descriptor.
947          * Finally, anytime all descriptors are clean the timer is
948          * set to 0.
949          */
950         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
951                 u32 head, tail;
952
953                 IXGBE_TX_LOCK(txr);
954                 if (txr->watchdog_timer == 0 || --txr->watchdog_timer) {
955                         IXGBE_TX_UNLOCK(txr);
956                         continue;
957                 } else {
958                         head = IXGBE_READ_REG(hw, IXGBE_TDH(i));
959                         tail = IXGBE_READ_REG(hw, IXGBE_TDT(i));
960                         if (head == tail) { /* last minute check */
961                                 IXGBE_TX_UNLOCK(txr);
962                                 continue;
963                         }
964                         /* Well, seems something is really hung */
965                         tx_hang = TRUE;
966                         IXGBE_TX_UNLOCK(txr);
967                         break;
968                 }
969         }
970         if (tx_hang == FALSE)
971                 return;
972
973         /*
974          * If we are in this routine because of pause frames, then don't
975          * reset the hardware.
976          */
977         if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
978                 txr = adapter->tx_rings;        /* reset pointer */
979                 for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
980                         IXGBE_TX_LOCK(txr);
981                         txr->watchdog_timer = IXGBE_TX_TIMEOUT;
982                         IXGBE_TX_UNLOCK(txr);
983                 }
984                 return;
985         }
986
987
988         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
989         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
990                 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
991                     IXGBE_READ_REG(hw, IXGBE_TDH(i)),
992                     IXGBE_READ_REG(hw, IXGBE_TDT(i)));
993                 device_printf(dev,"TX(%d) desc avail = %d,"
994                     "Next TX to Clean = %d\n",
995                     i, txr->tx_avail, txr->next_tx_to_clean);
996         }
997         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
998         adapter->watchdog_events++;
999
1000         ixgbe_init_locked(adapter);
1001 }
1002
1003 /*********************************************************************
1004  *  Init entry point
1005  *
1006  *  This routine is used in two ways. It is used by the stack as
1007  *  init entry point in network interface structure. It is also used
1008  *  by the driver as a hw/sw initialization routine to get to a
1009  *  consistent state.
1010  *
1011  *  return 0 on success, positive on failure
1012  **********************************************************************/
1013 #define IXGBE_MHADD_MFS_SHIFT 16
1014
1015 static void
1016 ixgbe_init_locked(struct adapter *adapter)
1017 {
1018         struct ifnet   *ifp = adapter->ifp;
1019         device_t        dev = adapter->dev;
1020         struct ixgbe_hw *hw;
1021         u32             k, txdctl, mhadd, gpie;
1022         u32             rxdctl, rxctrl;
1023         int             err;
1024
1025         INIT_DEBUGOUT("ixgbe_init: begin");
1026
1027         hw = &adapter->hw;
1028         mtx_assert(&adapter->core_mtx, MA_OWNED);
1029
1030         ixgbe_stop(adapter);
1031
1032         /* Get the latest mac address, User can use a LAA */
1033         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1034               IXGBE_ETH_LENGTH_OF_ADDRESS);
1035         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
1036         adapter->hw.addr_ctrl.rar_used_count = 1;
1037
1038         /* Initialize the hardware */
1039         if (ixgbe_hardware_init(adapter)) {
1040                 device_printf(dev, "Unable to initialize the hardware\n");
1041                 return;
1042         }
1043
1044 #ifndef IXGBE_HW_VLAN_SUPPORT
1045         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1046                 u32        ctrl;
1047
1048                 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1049                 ctrl |= IXGBE_VLNCTRL_VME;
1050                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1051                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1052         }
1053 #endif
1054         /* Prepare transmit descriptors and buffers */
1055         if (ixgbe_setup_transmit_structures(adapter)) {
1056                 device_printf(dev,"Could not setup transmit structures\n");
1057                 ixgbe_stop(adapter);
1058                 return;
1059         }
1060
1061         ixgbe_initialize_transmit_units(adapter);
1062
1063         /* Setup Multicast table */
1064         ixgbe_set_multi(adapter);
1065
1066         /*
1067         ** Determine the correct mbuf pool
1068         ** for doing jumbo/headersplit
1069         */
1070         if (ifp->if_mtu > ETHERMTU)
1071                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1072         else
1073                 adapter->rx_mbuf_sz = MCLBYTES;
1074
1075         /* Prepare receive descriptors and buffers */
1076         if (ixgbe_setup_receive_structures(adapter)) {
1077                 device_printf(dev,"Could not setup receive structures\n");
1078                 ixgbe_stop(adapter);
1079                 return;
1080         }
1081
1082         /* Configure RX settings */
1083         ixgbe_initialize_receive_units(adapter);
1084
1085         /* Configure Interrupt Moderation */
1086         ixgbe_init_moderation(adapter);
1087
1088         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1089
1090         if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1091                 gpie |= IXGBE_SDP1_GPIEN;
1092                 gpie |= IXGBE_SDP2_GPIEN;
1093         }
1094
1095         /* Enable Fan Failure Interrupt */
1096         if (hw->device_id == IXGBE_DEV_ID_82598AT)
1097                 gpie |= IXGBE_SDP1_GPIEN;
1098
1099         if (adapter->msix > 2) {
1100                 /* Enable Enhanced MSIX mode */
1101                 gpie |= IXGBE_GPIE_MSIX_MODE;
1102                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1103                     IXGBE_GPIE_OCD;
1104         }
1105         IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
1106
1107         /* Set the various hardware offload abilities */
1108         ifp->if_hwassist = 0;
1109         if (ifp->if_capenable & IFCAP_TSO4)
1110                 ifp->if_hwassist |= CSUM_TSO;
1111         else if (ifp->if_capenable & IFCAP_TXCSUM)
1112                 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
1113
1114         /* Set MTU size */
1115         if (ifp->if_mtu > ETHERMTU) {
1116                 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
1117                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1118                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1119                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
1120         }
1121         
1122         /* Now enable all the queues */
1123
1124         for (int i = 0; i < adapter->num_tx_queues; i++) {
1125                 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
1126                 txdctl |= IXGBE_TXDCTL_ENABLE;
1127                 /* Set WTHRESH to 8, burst writeback */
1128                 txdctl |= (8 << 16);
1129                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
1130         }
1131
1132         for (int i = 0; i < adapter->num_rx_queues; i++) {
1133                 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
1134                 /* PTHRESH set to 32 */
1135                 rxdctl |= 0x0020;
1136                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1137                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
1138                 for (k = 0; k < 10; k++) {
1139                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1140                             IXGBE_RXDCTL_ENABLE)
1141                                 break;
1142                         else
1143                                 msec_delay(1);
1144                 }
1145                 wmb();
1146                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1147         }
1148
1149         /* Enable Receive engine */
1150         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1151         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1152                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1153         rxctrl |= IXGBE_RXCTRL_RXEN;
1154         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1155
1156         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1157
1158         /* Set up MSI/X routing */
1159         if (ixgbe_enable_msix)
1160                 ixgbe_configure_ivars(adapter);
1161         else {  /* Simple settings for Legacy/MSI */
1162                 ixgbe_set_ivar(adapter, 0, 0, 0);
1163                 ixgbe_set_ivar(adapter, 0, 0, 1);
1164         }
1165
1166         ixgbe_enable_intr(adapter);
1167
1168         /*
1169         ** Check on any SFP devices that
1170         ** need to be kick-started
1171         */
1172         err = hw->phy.ops.identify(hw);
1173         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1174                 device_printf(dev,
1175                     "Unsupported SFP+ module type was detected.\n");
1176                 ixgbe_detach(dev);
1177                 return;
1178         }
1179         if (ixgbe_is_sfp(hw)) { 
1180                 if (hw->phy.multispeed_fiber) {
1181                         hw->mac.ops.setup_sfp(hw);
1182                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1183                 } else
1184                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1185         } else
1186                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1187
1188
1189 #ifdef IXGBE_TIMESYNC
1190         /* Initialize IEEE 1588 support */
1191         if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1192                 ixgbe_tsync_init(adapter);
1193 #endif
1194
1195         /* Now inform the stack we're ready */
1196         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1197         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1198
1199         return;
1200 }
1201
1202 static void
1203 ixgbe_init(void *arg)
1204 {
1205         struct adapter *adapter = arg;
1206
1207         IXGBE_CORE_LOCK(adapter);
1208         ixgbe_init_locked(adapter);
1209         IXGBE_CORE_UNLOCK(adapter);
1210         return;
1211 }
1212
1213
1214 /*
1215 ** MSIX Interrupt Tasklets
1216 */
1217
1218 static void
1219 ixgbe_handle_rx(void *context, int pending)
1220 {
1221         struct rx_ring  *rxr = context;
1222         struct adapter  *adapter = rxr->adapter;
1223         u32             loop = MAX_LOOP;
1224         bool            more;
1225
1226         do {
1227                 more = ixgbe_rxeof(rxr, -1);
1228         } while (loop-- && more);
1229         /* Reenable this interrupt */
1230         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1231 }
1232
1233 static void
1234 ixgbe_handle_tx(void *context, int pending)
1235 {
1236         struct tx_ring  *txr = context;
1237         struct adapter  *adapter = txr->adapter;
1238         struct ifnet    *ifp = adapter->ifp;
1239         u32             loop = MAX_LOOP;
1240         bool            more;
1241
1242         IXGBE_TX_LOCK(txr);
1243         do {
1244                 more = ixgbe_txeof(txr);
1245         } while (loop-- && more);
1246
1247         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1248                 ixgbe_start_locked(txr, ifp);
1249
1250         IXGBE_TX_UNLOCK(txr);
1251
1252         /* Reenable this interrupt */
1253         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1254 }
1255
1256
1257 /*********************************************************************
1258  *
1259  *  Legacy Interrupt Service routine
1260  *
1261  **********************************************************************/
1262
1263 static void
1264 ixgbe_legacy_irq(void *arg)
1265 {
1266         struct adapter  *adapter = arg;
1267         struct ixgbe_hw *hw = &adapter->hw;
1268         struct          tx_ring *txr = adapter->tx_rings;
1269         struct          rx_ring *rxr = adapter->rx_rings;
1270         bool            more;
1271         u32             reg_eicr, loop = MAX_LOOP;
1272
1273
1274         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1275
1276         if (reg_eicr == 0) {
1277                 ixgbe_enable_intr(adapter);
1278                 return;
1279         }
1280
1281         if (ixgbe_rxeof(rxr, adapter->rx_process_limit))
1282                 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1283
1284         IXGBE_TX_LOCK(txr);
1285         ++txr->tx_irq;
1286         do {
1287                 more = ixgbe_txeof(txr);
1288         } while (loop-- && more);
1289         IXGBE_TX_UNLOCK(txr);
1290
1291         if (more)
1292                 taskqueue_enqueue(txr->tq, &txr->tx_task);
1293
1294         /* Check for fan failure */
1295         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1296             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1297                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1298                     "REPLACE IMMEDIATELY!!\n");
1299                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1300         }
1301
1302         /* Link status change */
1303         if (reg_eicr & IXGBE_EICR_LSC) {
1304                 ixgbe_check_link(&adapter->hw,
1305                     &adapter->link_speed, &adapter->link_up, 0);
1306                 ixgbe_update_link_status(adapter);
1307         }
1308
1309         /* Update interrupt rate */
1310         if (ixgbe_enable_aim == TRUE)
1311                 ixgbe_update_aim(rxr);
1312
1313         ixgbe_enable_intr(adapter);
1314         return;
1315 }
1316
1317
1318 #if __FreeBSD_version >= 602105
1319 /*********************************************************************
1320  *
1321  *  MSI TX Interrupt Service routine
1322  *
1323  **********************************************************************/
1324 void
1325 ixgbe_msix_tx(void *arg)
1326 {
1327         struct tx_ring  *txr = arg;
1328         struct adapter  *adapter = txr->adapter;
1329         bool            more;
1330
1331         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, txr->eims);
1332
1333         IXGBE_TX_LOCK(txr);
1334         ++txr->tx_irq;
1335         more = ixgbe_txeof(txr);
1336         IXGBE_TX_UNLOCK(txr);
1337         if (more)
1338                 taskqueue_enqueue(txr->tq, &txr->tx_task);
1339         else /* Reenable this interrupt */
1340                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims);
1341         return;
1342 }
1343
1344
1345 /*********************************************************************
1346  *
1347  *  MSIX RX Interrupt Service routine
1348  *
1349  **********************************************************************/
1350 static void
1351 ixgbe_msix_rx(void *arg)
1352 {
1353         struct rx_ring  *rxr = arg;
1354         struct adapter  *adapter = rxr->adapter;
1355         bool            more;
1356
1357         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims);
1358
1359         ++rxr->rx_irq;
1360         more = ixgbe_rxeof(rxr, adapter->rx_process_limit);
1361
1362         /* Update interrupt rate */
1363         if (ixgbe_enable_aim == TRUE)
1364                 ixgbe_update_aim(rxr);
1365
1366         if (more)
1367                 taskqueue_enqueue(rxr->tq, &rxr->rx_task);
1368         else
1369                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims);
1370         return;
1371 }
1372
1373
1374 static void
1375 ixgbe_msix_link(void *arg)
1376 {
1377         struct adapter  *adapter = arg;
1378         struct ixgbe_hw *hw = &adapter->hw;
1379         u32             reg_eicr;
1380
1381         ++adapter->link_irq;
1382
1383         /* First get the cause */
1384         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, IXGBE_EIMS_OTHER);
1385         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1386         /* Clear with write */
1387         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1388
1389         /* Link status change */
1390         if (reg_eicr & IXGBE_EICR_LSC)
1391                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1392
1393         if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1394                 if (reg_eicr & IXGBE_EICR_ECC) {
1395                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1396                             "Please Reboot!!\n");
1397                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1398                 }
1399                 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1400                         /* Clear the interrupt */
1401                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1402                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1403                 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1404                         /* Clear the interrupt */
1405                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1406                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1407                 }
1408         } 
1409
1410         /* Check for fan failure */
1411         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1412             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1413                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1414                     "REPLACE IMMEDIATELY!!\n");
1415                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1416         }
1417
1418         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1419         return;
1420 }
1421 #endif /* FreeBSD_version >= 602105 */
1422
1423 /*
1424 ** Routine to do adjust the RX EITR value based on traffic,
1425 ** its a simple three state model, but seems to help.
1426 **
1427 ** Note that the three EITR values are tuneable using
1428 ** sysctl in real time. The feature can be effectively
1429 ** nullified by setting them equal.
1430 */
1431 #define BULK_THRESHOLD  10000
1432 #define AVE_THRESHOLD   1600
1433
1434 static void
1435 ixgbe_update_aim(struct rx_ring *rxr)
1436 {
1437         struct adapter  *adapter = rxr->adapter;
1438         u32             olditr, newitr;
1439
1440         /* Update interrupt moderation based on traffic */
1441         olditr = rxr->eitr_setting;
1442         newitr = olditr;
1443
1444         /* Idle, don't change setting */
1445         if (rxr->bytes == 0)   
1446                 return;
1447                 
1448         if (olditr == ixgbe_low_latency) {
1449                 if (rxr->bytes > AVE_THRESHOLD)
1450                         newitr = ixgbe_ave_latency;
1451         } else if (olditr == ixgbe_ave_latency) {
1452                 if (rxr->bytes < AVE_THRESHOLD)
1453                         newitr = ixgbe_low_latency;
1454                 else if (rxr->bytes > BULK_THRESHOLD)
1455                         newitr = ixgbe_bulk_latency;
1456         } else if (olditr == ixgbe_bulk_latency) {
1457                 if (rxr->bytes < BULK_THRESHOLD)
1458                         newitr = ixgbe_ave_latency;
1459         }
1460
1461         if (olditr != newitr) {
1462                 /* Change interrupt rate */
1463                 rxr->eitr_setting = newitr;
1464                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rxr->me),
1465                     newitr | (newitr << 16));
1466         }
1467
1468         rxr->bytes = 0;
1469         return;
1470 }
1471
1472 static void
1473 ixgbe_init_moderation(struct adapter *adapter)
1474 {
1475         struct rx_ring *rxr = adapter->rx_rings;
1476         struct tx_ring *txr = adapter->tx_rings;
1477
1478         /* Single interrupt - MSI or Legacy? */
1479         if (adapter->msix < 2) {
1480                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(0), 100);
1481                 return;
1482         }
1483
1484         /* TX irq moderation rate is fixed */
1485         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
1486                 IXGBE_WRITE_REG(&adapter->hw,
1487                     IXGBE_EITR(txr->msix), ixgbe_ave_latency);
1488                 txr->watchdog_timer = FALSE;
1489         }
1490
1491         /* RX moderation will be adapted over time, set default */
1492         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
1493                 IXGBE_WRITE_REG(&adapter->hw,
1494                     IXGBE_EITR(rxr->msix), ixgbe_low_latency);
1495         }
1496
1497         /* Set Link moderation */
1498         IXGBE_WRITE_REG(&adapter->hw,
1499             IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1500
1501 }
1502
1503 /*********************************************************************
1504  *
1505  *  Media Ioctl callback
1506  *
1507  *  This routine is called whenever the user queries the status of
1508  *  the interface using ifconfig.
1509  *
1510  **********************************************************************/
1511 static void
1512 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1513 {
1514         struct adapter *adapter = ifp->if_softc;
1515
1516         INIT_DEBUGOUT("ixgbe_media_status: begin");
1517         IXGBE_CORE_LOCK(adapter);
1518         ixgbe_update_link_status(adapter);
1519
1520         ifmr->ifm_status = IFM_AVALID;
1521         ifmr->ifm_active = IFM_ETHER;
1522
1523         if (!adapter->link_active) {
1524                 IXGBE_CORE_UNLOCK(adapter);
1525                 return;
1526         }
1527
1528         ifmr->ifm_status |= IFM_ACTIVE;
1529
1530         switch (adapter->link_speed) {
1531                 case IXGBE_LINK_SPEED_1GB_FULL:
1532                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1533                         break;
1534                 case IXGBE_LINK_SPEED_10GB_FULL:
1535                         ifmr->ifm_active |= adapter->optics | IFM_FDX;
1536                         break;
1537         }
1538
1539         IXGBE_CORE_UNLOCK(adapter);
1540
1541         return;
1542 }
1543
1544 /*********************************************************************
1545  *
1546  *  Media Ioctl callback
1547  *
1548  *  This routine is called when the user changes speed/duplex using
1549  *  media/mediopt option with ifconfig.
1550  *
1551  **********************************************************************/
1552 static int
1553 ixgbe_media_change(struct ifnet * ifp)
1554 {
1555         struct adapter *adapter = ifp->if_softc;
1556         struct ifmedia *ifm = &adapter->media;
1557
1558         INIT_DEBUGOUT("ixgbe_media_change: begin");
1559
1560         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1561                 return (EINVAL);
1562
1563         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1564         case IFM_AUTO:
1565                 adapter->hw.mac.autoneg = TRUE;
1566                 adapter->hw.phy.autoneg_advertised =
1567                     IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
1568                 break;
1569         default:
1570                 device_printf(adapter->dev, "Only auto media type\n");
1571                 return (EINVAL);
1572         }
1573
1574         return (0);
1575 }
1576
1577 /*********************************************************************
1578  *
1579  *  This routine maps the mbufs to tx descriptors.
1580  *    WARNING: while this code is using an MQ style infrastructure,
1581  *    it would NOT work as is with more than 1 queue.
1582  *
1583  *  return 0 on success, positive on failure
1584  **********************************************************************/
1585
1586 static int
1587 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1588 {
1589         struct adapter  *adapter = txr->adapter;
1590         u32             olinfo_status = 0, cmd_type_len;
1591         u32             paylen = 0;
1592         int             i, j, error, nsegs;
1593         int             first, last = 0, offload = 0;
1594         struct mbuf     *m_head;
1595         bus_dma_segment_t segs[ixgbe_num_segs];
1596         bus_dmamap_t    map;
1597         struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1598         union ixgbe_adv_tx_desc *txd = NULL;
1599
1600         m_head = *m_headp;
1601
1602         /* Basic descriptor defines */
1603         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1604             IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1605
1606         if (m_head->m_flags & M_VLANTAG)
1607                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1608
1609         /*
1610          * Important to capture the first descriptor
1611          * used because it will contain the index of
1612          * the one we tell the hardware to report back
1613          */
1614         first = txr->next_avail_tx_desc;
1615         txbuf = &txr->tx_buffers[first];
1616         txbuf_mapped = txbuf;
1617         map = txbuf->map;
1618
1619         /*
1620          * Map the packet for DMA.
1621          */
1622         error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1623             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1624
1625         if (error == EFBIG) {
1626                 struct mbuf *m;
1627
1628                 m = m_defrag(*m_headp, M_DONTWAIT);
1629                 if (m == NULL) {
1630                         adapter->mbuf_defrag_failed++;
1631                         m_freem(*m_headp);
1632                         *m_headp = NULL;
1633                         return (ENOBUFS);
1634                 }
1635                 *m_headp = m;
1636
1637                 /* Try it again */
1638                 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1639                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1640
1641                 if (error == ENOMEM) {
1642                         adapter->no_tx_dma_setup++;
1643                         return (error);
1644                 } else if (error != 0) {
1645                         adapter->no_tx_dma_setup++;
1646                         m_freem(*m_headp);
1647                         *m_headp = NULL;
1648                         return (error);
1649                 }
1650         } else if (error == ENOMEM) {
1651                 adapter->no_tx_dma_setup++;
1652                 return (error);
1653         } else if (error != 0) {
1654                 adapter->no_tx_dma_setup++;
1655                 m_freem(*m_headp);
1656                 *m_headp = NULL;
1657                 return (error);
1658         }
1659
1660         /* Make certain there are enough descriptors */
1661         if (nsegs > txr->tx_avail - 2) {
1662                 txr->no_tx_desc_avail++;
1663                 error = ENOBUFS;
1664                 goto xmit_fail;
1665         }
1666         m_head = *m_headp;
1667
1668         /*
1669         ** Set up the appropriate offload context
1670         ** this becomes the first descriptor of 
1671         ** a packet.
1672         */
1673         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1674                 if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1675                         cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1676                         olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1677                         olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1678                         olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1679                         ++adapter->tso_tx;
1680                 } else
1681                         return (ENXIO);
1682         } else /* Offloads other than TSO */
1683                 offload = ixgbe_tx_ctx_setup(txr, m_head);
1684         if (offload == TRUE)
1685                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1686 #ifdef IXGBE_TIMESYNC
1687         if (offload == IXGBE_TIMESTAMP)
1688                 cmd_type_len |= IXGBE_ADVTXD_TSTAMP;
1689 #endif
1690         /* Record payload length */
1691         if (paylen == 0)
1692                 olinfo_status |= m_head->m_pkthdr.len <<
1693                     IXGBE_ADVTXD_PAYLEN_SHIFT;
1694
1695         i = txr->next_avail_tx_desc;
1696         for (j = 0; j < nsegs; j++) {
1697                 bus_size_t seglen;
1698                 bus_addr_t segaddr;
1699
1700                 txbuf = &txr->tx_buffers[i];
1701                 txd = &txr->tx_base[i];
1702                 seglen = segs[j].ds_len;
1703                 segaddr = htole64(segs[j].ds_addr);
1704
1705                 txd->read.buffer_addr = segaddr;
1706                 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1707                     cmd_type_len |seglen);
1708                 txd->read.olinfo_status = htole32(olinfo_status);
1709                 last = i; /* Next descriptor that will get completed */
1710
1711                 if (++i == adapter->num_tx_desc)
1712                         i = 0;
1713
1714                 txbuf->m_head = NULL;
1715                 txbuf->eop_index = -1;
1716         }
1717
1718         txd->read.cmd_type_len |=
1719             htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1720         txr->tx_avail -= nsegs;
1721         txr->next_avail_tx_desc = i;
1722
1723         txbuf->m_head = m_head;
1724         txbuf->map = map;
1725         bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1726
1727         /* Set the index of the descriptor that will be marked done */
1728         txbuf = &txr->tx_buffers[first];
1729         txbuf->eop_index = last;
1730
1731         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1732             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1733         /*
1734          * Advance the Transmit Descriptor Tail (Tdt), this tells the
1735          * hardware that this frame is available to transmit.
1736          */
1737         ++txr->total_packets;
1738         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1739         return (0);
1740
1741 xmit_fail:
1742         bus_dmamap_unload(txr->txtag, txbuf->map);
1743         return (error);
1744
1745 }
1746
1747 static void
1748 ixgbe_set_promisc(struct adapter *adapter)
1749 {
1750
1751         u_int32_t       reg_rctl;
1752         struct ifnet   *ifp = adapter->ifp;
1753
1754         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1755
1756         if (ifp->if_flags & IFF_PROMISC) {
1757                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1758                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1759         } else if (ifp->if_flags & IFF_ALLMULTI) {
1760                 reg_rctl |= IXGBE_FCTRL_MPE;
1761                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1762                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1763         }
1764         return;
1765 }
1766
1767 static void
1768 ixgbe_disable_promisc(struct adapter * adapter)
1769 {
1770         u_int32_t       reg_rctl;
1771
1772         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1773
1774         reg_rctl &= (~IXGBE_FCTRL_UPE);
1775         reg_rctl &= (~IXGBE_FCTRL_MPE);
1776         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1777
1778         return;
1779 }
1780
1781
1782 /*********************************************************************
1783  *  Multicast Update
1784  *
1785  *  This routine is called whenever multicast address list is updated.
1786  *
1787  **********************************************************************/
1788 #define IXGBE_RAR_ENTRIES 16
1789
1790 static void
1791 ixgbe_set_multi(struct adapter *adapter)
1792 {
1793         u32     fctrl;
1794         u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1795         u8      *update_ptr;
1796         struct  ifmultiaddr *ifma;
1797         int     mcnt = 0;
1798         struct ifnet   *ifp = adapter->ifp;
1799
1800         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1801
1802         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1803         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1804         if (ifp->if_flags & IFF_PROMISC)
1805                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1806         else if (ifp->if_flags & IFF_ALLMULTI) {
1807                 fctrl |= IXGBE_FCTRL_MPE;
1808                 fctrl &= ~IXGBE_FCTRL_UPE;
1809         } else
1810                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1811         
1812         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1813
1814         IF_ADDR_LOCK(ifp);
1815         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1816                 if (ifma->ifma_addr->sa_family != AF_LINK)
1817                         continue;
1818                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1819                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1820                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1821                 mcnt++;
1822         }
1823         IF_ADDR_UNLOCK(ifp);
1824
1825         update_ptr = mta;
1826         ixgbe_update_mc_addr_list(&adapter->hw,
1827             update_ptr, mcnt, ixgbe_mc_array_itr);
1828
1829         return;
1830 }
1831
1832 /*
1833  * This is an iterator function now needed by the multicast
1834  * shared code. It simply feeds the shared code routine the
1835  * addresses in the array of ixgbe_set_multi() one by one.
1836  */
1837 static u8 *
1838 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1839 {
1840         u8 *addr = *update_ptr;
1841         u8 *newptr;
1842         *vmdq = 0;
1843
1844         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1845         *update_ptr = newptr;
1846         return addr;
1847 }
1848
1849
1850 /*********************************************************************
1851  *  Timer routine
1852  *
1853  *  This routine checks for link status,updates statistics,
1854  *  and runs the watchdog timer.
1855  *
1856  **********************************************************************/
1857
1858 static void
1859 ixgbe_local_timer(void *arg)
1860 {
1861         struct adapter *adapter = arg;
1862         struct ifnet   *ifp = adapter->ifp;
1863
1864         mtx_assert(&adapter->core_mtx, MA_OWNED);
1865
1866         /* Check for pluggable optics */
1867         if (adapter->sfp_probe)
1868                 if (!ixgbe_sfp_probe(adapter))
1869                         goto out; /* Nothing to do */
1870
1871         ixgbe_update_link_status(adapter);
1872         ixgbe_update_stats_counters(adapter);
1873         if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1874                 ixgbe_print_hw_stats(adapter);
1875         }
1876         /*
1877          * Each tick we check the watchdog
1878          * to protect against hardware hangs.
1879          */
1880         ixgbe_watchdog(adapter);
1881
1882 out:
1883         /* Trigger an RX interrupt on all queues */
1884         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, adapter->rx_mask);
1885
1886         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1887 }
1888
1889 /*
1890 ** Note: this routine updates the OS on the link state
1891 **      the real check of the hardware only happens with
1892 **      a link interrupt.
1893 */
1894 static void
1895 ixgbe_update_link_status(struct adapter *adapter)
1896 {
1897         struct ifnet    *ifp = adapter->ifp;
1898         struct tx_ring *txr = adapter->tx_rings;
1899         device_t dev = adapter->dev;
1900
1901
1902         if (adapter->link_up){ 
1903                 if (adapter->link_active == FALSE) {
1904                         if (bootverbose)
1905                                 device_printf(dev,"Link is up %d Gbps %s \n",
1906                                     ((adapter->link_speed == 128)? 10:1),
1907                                     "Full Duplex");
1908                         adapter->link_active = TRUE;
1909                         if_link_state_change(ifp, LINK_STATE_UP);
1910                 }
1911         } else { /* Link down */
1912                 if (adapter->link_active == TRUE) {
1913                         if (bootverbose)
1914                                 device_printf(dev,"Link is Down\n");
1915                         if_link_state_change(ifp, LINK_STATE_DOWN);
1916                         adapter->link_active = FALSE;
1917                         for (int i = 0; i < adapter->num_tx_queues;
1918                             i++, txr++)
1919                                 txr->watchdog_timer = FALSE;
1920                 }
1921         }
1922
1923         return;
1924 }
1925
1926
1927 /*********************************************************************
1928  *
1929  *  This routine disables all traffic on the adapter by issuing a
1930  *  global reset on the MAC and deallocates TX/RX buffers.
1931  *
1932  **********************************************************************/
1933
1934 static void
1935 ixgbe_stop(void *arg)
1936 {
1937         struct ifnet   *ifp;
1938         struct adapter *adapter = arg;
1939         ifp = adapter->ifp;
1940
1941         mtx_assert(&adapter->core_mtx, MA_OWNED);
1942
1943         INIT_DEBUGOUT("ixgbe_stop: begin\n");
1944         ixgbe_disable_intr(adapter);
1945
1946         /* Tell the stack that the interface is no longer active */
1947         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1948
1949 #ifdef IXGBE_TIMESYNC
1950         /* Disable IEEE 1588 support */
1951         if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1952                 ixgbe_tsync_disable(adapter);
1953 #endif
1954         ixgbe_reset_hw(&adapter->hw);
1955         adapter->hw.adapter_stopped = FALSE;
1956         ixgbe_stop_adapter(&adapter->hw);
1957         callout_stop(&adapter->timer);
1958
1959         /* reprogram the RAR[0] in case user changed it. */
1960         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1961
1962         return;
1963 }
1964
1965
1966 /*********************************************************************
1967  *
1968  *  Determine hardware revision.
1969  *
1970  **********************************************************************/
1971 static void
1972 ixgbe_identify_hardware(struct adapter *adapter)
1973 {
1974         device_t        dev = adapter->dev;
1975
1976         /* Save off the information about this board */
1977         adapter->hw.vendor_id = pci_get_vendor(dev);
1978         adapter->hw.device_id = pci_get_device(dev);
1979         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1980         adapter->hw.subsystem_vendor_id =
1981             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1982         adapter->hw.subsystem_device_id =
1983             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1984
1985         return;
1986 }
1987
1988 /*********************************************************************
1989  *
1990  *  Setup the Legacy or MSI Interrupt handler
1991  *
1992  **********************************************************************/
1993 static int
1994 ixgbe_allocate_legacy(struct adapter *adapter)
1995 {
1996         device_t dev = adapter->dev;
1997         struct          tx_ring *txr = adapter->tx_rings;
1998         struct          rx_ring *rxr = adapter->rx_rings;
1999         int error;
2000
2001         /* Legacy RID at 0 */
2002         if (adapter->msix == 0)
2003                 adapter->rid[0] = 0;
2004
2005         /* We allocate a single interrupt resource */
2006         adapter->res[0] = bus_alloc_resource_any(dev,
2007             SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2008         if (adapter->res[0] == NULL) {
2009                 device_printf(dev, "Unable to allocate bus resource: "
2010                     "interrupt\n");
2011                 return (ENXIO);
2012         }
2013
2014         /*
2015          * Try allocating a fast interrupt and the associated deferred
2016          * processing contexts.
2017          */
2018         TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2019         TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2020         txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2021             taskqueue_thread_enqueue, &txr->tq);
2022         rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2023             taskqueue_thread_enqueue, &rxr->tq);
2024         taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2025             device_get_nameunit(adapter->dev));
2026         taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2027             device_get_nameunit(adapter->dev));
2028
2029         if ((error = bus_setup_intr(dev, adapter->res[0],
2030 #if __FreeBSD_version >= 700000
2031             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
2032 #else
2033             INTR_TYPE_NET | INTR_MPSAFE, ixgbe_legacy_irq,
2034 #endif
2035             adapter, &adapter->tag[0])) != 0) {
2036                 device_printf(dev, "Failed to register fast interrupt "
2037                     "handler: %d\n", error);
2038                 taskqueue_free(txr->tq);
2039                 taskqueue_free(rxr->tq);
2040                 txr->tq = NULL;
2041                 rxr->tq = NULL;
2042                 return (error);
2043         }
2044
2045         return (0);
2046 }
2047
2048
2049 #if __FreeBSD_version >= 602105
2050 /*********************************************************************
2051  *
2052  *  Setup MSIX Interrupt resources and handlers 
2053  *
2054  **********************************************************************/
2055 static int
2056 ixgbe_allocate_msix(struct adapter *adapter)
2057 {
2058         device_t        dev = adapter->dev;
2059         struct          tx_ring *txr = adapter->tx_rings;
2060         struct          rx_ring *rxr = adapter->rx_rings;
2061         int             error, vector = 0;
2062
2063         /* TX setup: the code is here for multi tx,
2064            there are other parts of the driver not ready for it */
2065         for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
2066                 adapter->res[vector] = bus_alloc_resource_any(dev,
2067                     SYS_RES_IRQ, &adapter->rid[vector],
2068                     RF_SHAREABLE | RF_ACTIVE);
2069                 if (!adapter->res[vector]) {
2070                         device_printf(dev,"Unable to allocate"
2071                             " bus resource: tx interrupt [%d]\n", vector);
2072                         return (ENXIO);
2073                 }
2074                 /* Set the handler function */
2075                 error = bus_setup_intr(dev, adapter->res[vector],
2076                     INTR_TYPE_NET | INTR_MPSAFE,
2077 #if __FreeBSD_version > 700000
2078                     NULL,
2079 #endif
2080                     ixgbe_msix_tx, txr, &adapter->tag[vector]);
2081                 if (error) {
2082                         adapter->res[vector] = NULL;
2083                         device_printf(dev, "Failed to register TX handler");
2084                         return (error);
2085                 }
2086                 txr->msix = vector;
2087                 txr->eims = 1 << vector;
2088                 TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr);
2089                 txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT,
2090                     taskqueue_thread_enqueue, &txr->tq);
2091                 taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq",
2092                     device_get_nameunit(adapter->dev));
2093         }
2094
2095         /* RX setup */
2096         for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
2097                 adapter->res[vector] = bus_alloc_resource_any(dev,
2098                     SYS_RES_IRQ, &adapter->rid[vector],
2099                     RF_SHAREABLE | RF_ACTIVE);
2100                 if (!adapter->res[vector]) {
2101                         device_printf(dev,"Unable to allocate"
2102                             " bus resource: rx interrupt [%d],"
2103                             "rid = %d\n", i, adapter->rid[vector]);
2104                         return (ENXIO);
2105                 }
2106                 /* Set the handler function */
2107                 error = bus_setup_intr(dev, adapter->res[vector],
2108                     INTR_TYPE_NET | INTR_MPSAFE,
2109 #if __FreeBSD_version > 700000
2110                     NULL,
2111 #endif
2112                     ixgbe_msix_rx, rxr, &adapter->tag[vector]);
2113                 if (error) {
2114                         adapter->res[vector] = NULL;
2115                         device_printf(dev, "Failed to register RX handler");
2116                         return (error);
2117                 }
2118                 rxr->msix = vector;
2119                 rxr->eims = 1 << vector;
2120                 /* used in local timer */
2121                 adapter->rx_mask |= rxr->eims;
2122                 TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr);
2123                 rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT,
2124                     taskqueue_thread_enqueue, &rxr->tq);
2125                 taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq",
2126                     device_get_nameunit(adapter->dev));
2127         }
2128
2129         /* Now for Link changes */
2130         adapter->res[vector] = bus_alloc_resource_any(dev,
2131             SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
2132         if (!adapter->res[vector]) {
2133                 device_printf(dev,"Unable to allocate"
2134             " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
2135                 return (ENXIO);
2136         }
2137         /* Set the link handler function */
2138         error = bus_setup_intr(dev, adapter->res[vector],
2139             INTR_TYPE_NET | INTR_MPSAFE,
2140 #if __FreeBSD_version > 700000
2141                     NULL,
2142 #endif
2143             ixgbe_msix_link, adapter, &adapter->tag[vector]);
2144         if (error) {
2145                 adapter->res[vector] = NULL;
2146                 device_printf(dev, "Failed to register LINK handler");
2147                 return (error);
2148         }
2149         adapter->linkvec = vector;
2150         /* Tasklets for Link, SFP and Multispeed Fiber */
2151         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2152         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2153         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2154         adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
2155             taskqueue_thread_enqueue, &adapter->tq);
2156         taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
2157             device_get_nameunit(adapter->dev));
2158
2159         return (0);
2160 }
2161 #else  /* Freebsd 6.1/2 */
2162 static int
2163 ixgbe_allocate_msix(struct adapter *adapter)
2164 {
2165         return (1);
2166 }
2167 #endif
2168
2169 #if __FreeBSD_version >= 602105
2170 /*
2171  * Setup Either MSI/X or MSI
2172  */
2173 static int
2174 ixgbe_setup_msix(struct adapter *adapter)
2175 {
2176         device_t dev = adapter->dev;
2177         int rid, want, queues, msgs;
2178
2179         /* Override by tuneable */
2180         if (ixgbe_enable_msix == 0)
2181                 goto msi;
2182
2183         /* First try MSI/X */
2184         rid = PCIR_BAR(MSIX_82598_BAR);
2185         adapter->msix_mem = bus_alloc_resource_any(dev,
2186             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2187         if (!adapter->msix_mem) {
2188                 rid += 4;       /* 82599 maps in higher BAR */
2189                 adapter->msix_mem = bus_alloc_resource_any(dev,
2190                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2191         }
2192         if (!adapter->msix_mem) {
2193                 /* May not be enabled */
2194                 device_printf(adapter->dev,
2195                     "Unable to map MSIX table \n");
2196                 goto msi;
2197         }
2198
2199         msgs = pci_msix_count(dev); 
2200         if (msgs == 0) { /* system has msix disabled */
2201                 bus_release_resource(dev, SYS_RES_MEMORY,
2202                     rid, adapter->msix_mem);
2203                 adapter->msix_mem = NULL;
2204                 goto msi;
2205         }
2206
2207         /* Figure out a reasonable auto config value */
2208         queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus;
2209
2210         if (ixgbe_tx_queues == 0)
2211                 ixgbe_tx_queues = queues;
2212         if (ixgbe_rx_queues == 0)
2213                 ixgbe_rx_queues = queues;
2214         want = ixgbe_tx_queues + ixgbe_rx_queues + 1;
2215         if (msgs >= want)
2216                 msgs = want;
2217         else {
2218                 device_printf(adapter->dev,
2219                     "MSIX Configuration Problem, "
2220                     "%d vectors but %d queues wanted!\n",
2221                     msgs, want);
2222                 return (ENXIO);
2223         }
2224         if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2225                 device_printf(adapter->dev,
2226                     "Using MSIX interrupts with %d vectors\n", msgs);
2227                 adapter->num_tx_queues = ixgbe_tx_queues;
2228                 adapter->num_rx_queues = ixgbe_rx_queues;
2229                 return (msgs);
2230         }
2231 msi:
2232         msgs = pci_msi_count(dev);
2233         if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2234                 device_printf(adapter->dev,"Using MSI interrupt\n");
2235         return (msgs);
2236 }
2237 #endif /* FreeBSD_version >= 602105 */
2238
2239 static int
2240 ixgbe_allocate_pci_resources(struct adapter *adapter)
2241 {
2242         int             rid;
2243         device_t        dev = adapter->dev;
2244
2245         rid = PCIR_BAR(0);
2246         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2247             &rid, RF_ACTIVE);
2248
2249         if (!(adapter->pci_mem)) {
2250                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2251                 return (ENXIO);
2252         }
2253
2254         adapter->osdep.mem_bus_space_tag =
2255                 rman_get_bustag(adapter->pci_mem);
2256         adapter->osdep.mem_bus_space_handle =
2257                 rman_get_bushandle(adapter->pci_mem);
2258         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2259
2260         /*
2261          * Init the resource arrays
2262          */
2263         for (int i = 0; i < IXGBE_MSGS; i++) {
2264                 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */   
2265                 adapter->tag[i] = NULL;
2266                 adapter->res[i] = NULL;
2267         }
2268
2269         /* Legacy defaults */
2270         adapter->num_tx_queues = 1;
2271         adapter->num_rx_queues = 1;
2272
2273 #if __FreeBSD_version >= 602105
2274         /* Now setup MSI or MSI/X */
2275         adapter->msix = ixgbe_setup_msix(adapter);
2276 #endif
2277         adapter->hw.back = &adapter->osdep;
2278         return (0);
2279 }
2280
2281 static void
2282 ixgbe_free_pci_resources(struct adapter * adapter)
2283 {
2284         device_t        dev = adapter->dev;
2285         int             rid;
2286
2287         /*
2288          * Legacy has this set to 0, but we need
2289          * to run this once, so reset it.
2290          */
2291         if (adapter->msix == 0)
2292                 adapter->msix = 1;
2293
2294         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2295                 rid = PCIR_BAR(MSIX_82598_BAR);
2296         else
2297                 rid = PCIR_BAR(MSIX_82599_BAR);
2298
2299         /*
2300          * First release all the interrupt resources:
2301          *      notice that since these are just kept
2302          *      in an array we can do the same logic
2303          *      whether its MSIX or just legacy.
2304          */
2305         for (int i = 0; i < adapter->msix; i++) {
2306                 if (adapter->tag[i] != NULL) {
2307                         bus_teardown_intr(dev, adapter->res[i],
2308                             adapter->tag[i]);
2309                         adapter->tag[i] = NULL;
2310                 }
2311                 if (adapter->res[i] != NULL) {
2312                         bus_release_resource(dev, SYS_RES_IRQ,
2313                             adapter->rid[i], adapter->res[i]);
2314                 }
2315         }
2316
2317 #if __FreeBSD_version >= 602105
2318         if (adapter->msix)
2319                 pci_release_msi(dev);
2320
2321         if (adapter->msix_mem != NULL)
2322                 bus_release_resource(dev, SYS_RES_MEMORY,
2323                     rid, adapter->msix_mem);
2324 #endif
2325
2326         if (adapter->pci_mem != NULL)
2327                 bus_release_resource(dev, SYS_RES_MEMORY,
2328                     PCIR_BAR(0), adapter->pci_mem);
2329
2330         return;
2331 }
2332
2333 /*********************************************************************
2334  *
2335  *  Initialize the hardware to a configuration as specified by the
2336  *  adapter structure. The controller is reset, the EEPROM is
2337  *  verified, the MAC address is set, then the shared initialization
2338  *  routines are called.
2339  *
2340  **********************************************************************/
2341 static int
2342 ixgbe_hardware_init(struct adapter *adapter)
2343 {
2344         device_t dev = adapter->dev;
2345         u16 csum;
2346
2347         csum = 0;
2348         /* Issue a global reset */
2349         adapter->hw.adapter_stopped = FALSE;
2350         ixgbe_stop_adapter(&adapter->hw);
2351
2352         /* Make sure we have a good EEPROM before we read from it */
2353         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
2354                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
2355                 return (EIO);
2356         }
2357
2358         /* Get Hardware Flow Control setting */
2359         adapter->hw.fc.requested_mode = ixgbe_fc_full;
2360         adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
2361         adapter->hw.fc.low_water = IXGBE_FC_LO;
2362         adapter->hw.fc.high_water = IXGBE_FC_HI;
2363         adapter->hw.fc.send_xon = TRUE;
2364
2365         if (ixgbe_init_hw(&adapter->hw)) {
2366                 device_printf(dev,"Hardware Initialization Failed");
2367                 return (EIO);
2368         }
2369
2370         return (0);
2371 }
2372
2373 /*********************************************************************
2374  *
2375  *  Setup networking device structure and register an interface.
2376  *
2377  **********************************************************************/
2378 static void
2379 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2380 {
2381         struct ifnet   *ifp;
2382         struct ixgbe_hw *hw = &adapter->hw;
2383         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2384
2385         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2386         if (ifp == NULL)
2387                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
2388         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2389         ifp->if_mtu = ETHERMTU;
2390         ifp->if_baudrate = 1000000000;
2391         ifp->if_init = ixgbe_init;
2392         ifp->if_softc = adapter;
2393         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2394         ifp->if_ioctl = ixgbe_ioctl;
2395         ifp->if_start = ixgbe_start;
2396         ifp->if_timer = 0;
2397         ifp->if_watchdog = NULL;
2398         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
2399
2400         ether_ifattach(ifp, adapter->hw.mac.addr);
2401
2402         adapter->max_frame_size =
2403             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2404
2405         /*
2406          * Tell the upper layer(s) we support long frames.
2407          */
2408         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2409
2410         ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
2411         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2412         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2413
2414         ifp->if_capenable = ifp->if_capabilities;
2415
2416         if (hw->device_id == IXGBE_DEV_ID_82598AT)
2417                 ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL |
2418                     IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE);
2419         else
2420                 ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL,
2421                     TRUE, FALSE);
2422
2423         /*
2424          * Specify the media types supported by this adapter and register
2425          * callbacks to update media and link information
2426          */
2427         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2428                      ixgbe_media_status);
2429         ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics |
2430             IFM_FDX, 0, NULL);
2431         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2432                 ifmedia_add(&adapter->media,
2433                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2434                 ifmedia_add(&adapter->media,
2435                     IFM_ETHER | IFM_1000_T, 0, NULL);
2436         }
2437         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2438         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2439
2440         return;
2441 }
2442
2443 /********************************************************************
2444  * Manage DMA'able memory.
2445  *******************************************************************/
2446 static void
2447 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2448 {
2449         if (error)
2450                 return;
2451         *(bus_addr_t *) arg = segs->ds_addr;
2452         return;
2453 }
2454
2455 static int
2456 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2457                 struct ixgbe_dma_alloc *dma, int mapflags)
2458 {
2459         device_t dev = adapter->dev;
2460         int             r;
2461
2462         r = bus_dma_tag_create(NULL,    /* parent */
2463                                1, 0,    /* alignment, bounds */
2464                                BUS_SPACE_MAXADDR,       /* lowaddr */
2465                                BUS_SPACE_MAXADDR,       /* highaddr */
2466                                NULL, NULL,      /* filter, filterarg */
2467                                size,    /* maxsize */
2468                                1,       /* nsegments */
2469                                size,    /* maxsegsize */
2470                                BUS_DMA_ALLOCNOW,        /* flags */
2471                                NULL,    /* lockfunc */
2472                                NULL,    /* lockfuncarg */
2473                                &dma->dma_tag);
2474         if (r != 0) {
2475                 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2476                        "error %u\n", r);
2477                 goto fail_0;
2478         }
2479         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2480                              BUS_DMA_NOWAIT, &dma->dma_map);
2481         if (r != 0) {
2482                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2483                        "error %u\n", r);
2484                 goto fail_1;
2485         }
2486         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2487                             size,
2488                             ixgbe_dmamap_cb,
2489                             &dma->dma_paddr,
2490                             mapflags | BUS_DMA_NOWAIT);
2491         if (r != 0) {
2492                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2493                        "error %u\n", r);
2494                 goto fail_2;
2495         }
2496         dma->dma_size = size;
2497         return (0);
2498 fail_2:
2499         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2500 fail_1:
2501         bus_dma_tag_destroy(dma->dma_tag);
2502 fail_0:
2503         dma->dma_map = NULL;
2504         dma->dma_tag = NULL;
2505         return (r);
2506 }
2507
2508 static void
2509 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2510 {
2511         bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2512             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2513         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2514         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2515         bus_dma_tag_destroy(dma->dma_tag);
2516 }
2517
2518
2519 /*********************************************************************
2520  *
2521  *  Allocate memory for the transmit and receive rings, and then
2522  *  the descriptors associated with each, called only once at attach.
2523  *
2524  **********************************************************************/
2525 static int
2526 ixgbe_allocate_queues(struct adapter *adapter)
2527 {
2528         device_t dev = adapter->dev;
2529         struct tx_ring *txr;
2530         struct rx_ring *rxr;
2531         int rsize, tsize, error = IXGBE_SUCCESS;
2532         int txconf = 0, rxconf = 0;
2533
2534         /* First allocate the TX ring struct memory */
2535         if (!(adapter->tx_rings =
2536             (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2537             adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2538                 device_printf(dev, "Unable to allocate TX ring memory\n");
2539                 error = ENOMEM;
2540                 goto fail;
2541         }
2542         txr = adapter->tx_rings;
2543
2544         /* Next allocate the RX */
2545         if (!(adapter->rx_rings =
2546             (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2547             adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2548                 device_printf(dev, "Unable to allocate RX ring memory\n");
2549                 error = ENOMEM;
2550                 goto rx_fail;
2551         }
2552         rxr = adapter->rx_rings;
2553
2554         /* For the ring itself */
2555         tsize = roundup2(adapter->num_tx_desc *
2556             sizeof(union ixgbe_adv_tx_desc), 4096);
2557
2558         /*
2559          * Now set up the TX queues, txconf is needed to handle the
2560          * possibility that things fail midcourse and we need to
2561          * undo memory gracefully
2562          */ 
2563         for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
2564                 /* Set up some basics */
2565                 txr = &adapter->tx_rings[i];
2566                 txr->adapter = adapter;
2567                 txr->me = i;
2568
2569                 /* Initialize the TX side lock */
2570                 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2571                     device_get_nameunit(dev), txr->me);
2572                 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2573
2574                 if (ixgbe_dma_malloc(adapter, tsize,
2575                         &txr->txdma, BUS_DMA_NOWAIT)) {
2576                         device_printf(dev,
2577                             "Unable to allocate TX Descriptor memory\n");
2578                         error = ENOMEM;
2579                         goto err_tx_desc;
2580                 }
2581                 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2582                 bzero((void *)txr->tx_base, tsize);
2583
2584                 /* Now allocate transmit buffers for the ring */
2585                 if (ixgbe_allocate_transmit_buffers(txr)) {
2586                         device_printf(dev,
2587                             "Critical Failure setting up transmit buffers\n");
2588                         error = ENOMEM;
2589                         goto err_tx_desc;
2590                 }
2591
2592         }
2593
2594         /*
2595          * Next the RX queues...
2596          */ 
2597         rsize = roundup2(adapter->num_rx_desc *
2598             sizeof(union ixgbe_adv_rx_desc), 4096);
2599         for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
2600                 rxr = &adapter->rx_rings[i];
2601                 /* Set up some basics */
2602                 rxr->adapter = adapter;
2603                 rxr->me = i;
2604
2605                 /* Initialize the RX side lock */
2606                 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2607                     device_get_nameunit(dev), rxr->me);
2608                 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2609
2610                 if (ixgbe_dma_malloc(adapter, rsize,
2611                         &rxr->rxdma, BUS_DMA_NOWAIT)) {
2612                         device_printf(dev,
2613                             "Unable to allocate RxDescriptor memory\n");
2614                         error = ENOMEM;
2615                         goto err_rx_desc;
2616                 }
2617                 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2618                 bzero((void *)rxr->rx_base, rsize);
2619
2620                 /* Allocate receive buffers for the ring*/
2621                 if (ixgbe_allocate_receive_buffers(rxr)) {
2622                         device_printf(dev,
2623                             "Critical Failure setting up receive buffers\n");
2624                         error = ENOMEM;
2625                         goto err_rx_desc;
2626                 }
2627         }
2628
2629         return (0);
2630
2631 err_rx_desc:
2632         for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2633                 ixgbe_dma_free(adapter, &rxr->rxdma);
2634 err_tx_desc:
2635         for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2636                 ixgbe_dma_free(adapter, &txr->txdma);
2637         free(adapter->rx_rings, M_DEVBUF);
2638 rx_fail:
2639         free(adapter->tx_rings, M_DEVBUF);
2640 fail:
2641         return (error);
2642 }
2643
2644 /*********************************************************************
2645  *
2646  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2647  *  the information needed to transmit a packet on the wire. This is
2648  *  called only once at attach, setup is done every reset.
2649  *
2650  **********************************************************************/
2651 static int
2652 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2653 {
2654         struct adapter *adapter = txr->adapter;
2655         device_t dev = adapter->dev;
2656         struct ixgbe_tx_buf *txbuf;
2657         int error, i;
2658
2659         /*
2660          * Setup DMA descriptor areas.
2661          */
2662         if ((error = bus_dma_tag_create(NULL,           /* parent */
2663                                1, 0,            /* alignment, bounds */
2664                                BUS_SPACE_MAXADDR,       /* lowaddr */
2665                                BUS_SPACE_MAXADDR,       /* highaddr */
2666                                NULL, NULL,              /* filter, filterarg */
2667                                IXGBE_TSO_SIZE,          /* maxsize */
2668                                ixgbe_num_segs,          /* nsegments */
2669                                PAGE_SIZE,               /* maxsegsize */
2670                                0,                       /* flags */
2671                                NULL,                    /* lockfunc */
2672                                NULL,                    /* lockfuncarg */
2673                                &txr->txtag))) {
2674                 device_printf(dev,"Unable to allocate TX DMA tag\n");
2675                 goto fail;
2676         }
2677
2678         if (!(txr->tx_buffers =
2679             (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2680             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2681                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2682                 error = ENOMEM;
2683                 goto fail;
2684         }
2685
2686         /* Create the descriptor buffer dma maps */
2687         txbuf = txr->tx_buffers;
2688         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2689                 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2690                 if (error != 0) {
2691                         device_printf(dev, "Unable to create TX DMA map\n");
2692                         goto fail;
2693                 }
2694         }
2695
2696         return 0;
2697 fail:
2698         /* We free all, it handles case where we are in the middle */
2699         ixgbe_free_transmit_structures(adapter);
2700         return (error);
2701 }
2702
2703 /*********************************************************************
2704  *
2705  *  Initialize a transmit ring.
2706  *
2707  **********************************************************************/
2708 static void
2709 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2710 {
2711         struct adapter *adapter = txr->adapter;
2712         struct ixgbe_tx_buf *txbuf;
2713         int i;
2714
2715         /* Clear the old ring contents */
2716         bzero((void *)txr->tx_base,
2717               (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2718         /* Reset indices */
2719         txr->next_avail_tx_desc = 0;
2720         txr->next_tx_to_clean = 0;
2721
2722         /* Free any existing tx buffers. */
2723         txbuf = txr->tx_buffers;
2724         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2725                 if (txbuf->m_head != NULL) {
2726                         bus_dmamap_sync(txr->txtag, txbuf->map,
2727                             BUS_DMASYNC_POSTWRITE);
2728                         bus_dmamap_unload(txr->txtag, txbuf->map);
2729                         m_freem(txbuf->m_head);
2730                         txbuf->m_head = NULL;
2731                 }
2732                 /* Clear the EOP index */
2733                 txbuf->eop_index = -1;
2734         }
2735
2736         /* Set number of descriptors available */
2737         txr->tx_avail = adapter->num_tx_desc;
2738
2739         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2740             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2741 }
2742
2743 /*********************************************************************
2744  *
2745  *  Initialize all transmit rings.
2746  *
2747  **********************************************************************/
2748 static int
2749 ixgbe_setup_transmit_structures(struct adapter *adapter)
2750 {
2751         struct tx_ring *txr = adapter->tx_rings;
2752
2753         for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
2754                 ixgbe_setup_transmit_ring(txr);
2755
2756         return (0);
2757 }
2758
2759 /*********************************************************************
2760  *
2761  *  Enable transmit unit.
2762  *
2763  **********************************************************************/
2764 static void
2765 ixgbe_initialize_transmit_units(struct adapter *adapter)
2766 {
2767         struct tx_ring  *txr = adapter->tx_rings;
2768         struct ixgbe_hw *hw = &adapter->hw;
2769
2770         /* Setup the Base and Length of the Tx Descriptor Ring */
2771
2772         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2773                 u64     tdba = txr->txdma.dma_paddr;
2774
2775                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2776                        (tdba & 0x00000000ffffffffULL));
2777                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2778                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2779                     adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2780
2781                 /* Setup the HW Tx Head and Tail descriptor pointers */
2782                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2783                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2784
2785                 /* Setup Transmit Descriptor Cmd Settings */
2786                 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2787
2788                 txr->watchdog_timer = 0;
2789         }
2790
2791         if (hw->mac.type == ixgbe_mac_82599EB) {
2792                 u32 dmatxctl;
2793                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2794                 dmatxctl |= IXGBE_DMATXCTL_TE;
2795                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2796         }
2797
2798         return;
2799 }
2800
2801 /*********************************************************************
2802  *
2803  *  Free all transmit rings.
2804  *
2805  **********************************************************************/
2806 static void
2807 ixgbe_free_transmit_structures(struct adapter *adapter)
2808 {
2809         struct tx_ring *txr = adapter->tx_rings;
2810
2811         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2812                 IXGBE_TX_LOCK(txr);
2813                 ixgbe_free_transmit_buffers(txr);
2814                 ixgbe_dma_free(adapter, &txr->txdma);
2815                 IXGBE_TX_UNLOCK(txr);
2816                 IXGBE_TX_LOCK_DESTROY(txr);
2817         }
2818         free(adapter->tx_rings, M_DEVBUF);
2819 }
2820
2821 /*********************************************************************
2822  *
2823  *  Free transmit ring related data structures.
2824  *
2825  **********************************************************************/
2826 static void
2827 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2828 {
2829         struct adapter *adapter = txr->adapter;
2830         struct ixgbe_tx_buf *tx_buffer;
2831         int             i;
2832
2833         INIT_DEBUGOUT("free_transmit_ring: begin");
2834
2835         if (txr->tx_buffers == NULL)
2836                 return;
2837
2838         tx_buffer = txr->tx_buffers;
2839         for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2840                 if (tx_buffer->m_head != NULL) {
2841                         bus_dmamap_sync(txr->txtag, tx_buffer->map,
2842                             BUS_DMASYNC_POSTWRITE);
2843                         bus_dmamap_unload(txr->txtag,
2844                             tx_buffer->map);
2845                         m_freem(tx_buffer->m_head);
2846                         tx_buffer->m_head = NULL;
2847                         if (tx_buffer->map != NULL) {
2848                                 bus_dmamap_destroy(txr->txtag,
2849                                     tx_buffer->map);
2850                                 tx_buffer->map = NULL;
2851                         }
2852                 } else if (tx_buffer->map != NULL) {
2853                         bus_dmamap_unload(txr->txtag,
2854                             tx_buffer->map);
2855                         bus_dmamap_destroy(txr->txtag,
2856                             tx_buffer->map);
2857                         tx_buffer->map = NULL;
2858                 }
2859         }
2860
2861         if (txr->tx_buffers != NULL) {
2862                 free(txr->tx_buffers, M_DEVBUF);
2863                 txr->tx_buffers = NULL;
2864         }
2865         if (txr->txtag != NULL) {
2866                 bus_dma_tag_destroy(txr->txtag);
2867                 txr->txtag = NULL;
2868         }
2869         return;
2870 }
2871
2872 /*********************************************************************
2873  *
2874  *  Advanced Context Descriptor setup for VLAN or CSUM
2875  *
2876  **********************************************************************/
2877
2878 static int
2879 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2880 {
2881         struct adapter *adapter = txr->adapter;
2882         struct ixgbe_adv_tx_context_desc *TXD;
2883         struct ixgbe_tx_buf        *tx_buffer;
2884         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2885         struct ether_vlan_header *eh;
2886         struct ip *ip = NULL;
2887         struct ip6_hdr *ip6;
2888         int  ehdrlen, ip_hlen = 0;
2889         u16     etype;
2890         u8      ipproto = 0;
2891         bool    offload = FALSE;
2892         int ctxd = txr->next_avail_tx_desc;
2893 #if __FreeBSD_version < 700000
2894         struct m_tag    *mtag;
2895 #else
2896         u16 vtag = 0;
2897 #endif
2898
2899
2900         if (mp->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2901                 offload = TRUE;
2902
2903         tx_buffer = &txr->tx_buffers[ctxd];
2904         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2905
2906         /*
2907         ** In advanced descriptors the vlan tag must 
2908         ** be placed into the descriptor itself.
2909         */
2910 #if __FreeBSD_version < 700000
2911         mtag = VLAN_OUTPUT_TAG(ifp, mp);
2912         if (mtag != NULL) {
2913                 vlan_macip_lens |=
2914                     htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
2915                 offload = TRUE;
2916         } 
2917 #else
2918         if (mp->m_flags & M_VLANTAG) {
2919                 vtag = htole16(mp->m_pkthdr.ether_vtag);
2920                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2921                 offload = TRUE;
2922         } 
2923 #endif
2924         /*
2925          * Determine where frame payload starts.
2926          * Jump over vlan headers if already present,
2927          * helpful for QinQ too.
2928          */
2929         eh = mtod(mp, struct ether_vlan_header *);
2930         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2931                 etype = ntohs(eh->evl_proto);
2932                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2933         } else {
2934                 etype = ntohs(eh->evl_encap_proto);
2935                 ehdrlen = ETHER_HDR_LEN;
2936         }
2937
2938         /* Set the ether header length */
2939         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2940
2941         switch (etype) {
2942                 case ETHERTYPE_IP:
2943                         ip = (struct ip *)(mp->m_data + ehdrlen);
2944                         ip_hlen = ip->ip_hl << 2;
2945                         if (mp->m_len < ehdrlen + ip_hlen)
2946                                 return FALSE; /* failure */
2947                         ipproto = ip->ip_p;
2948                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2949                         break;
2950                 case ETHERTYPE_IPV6:
2951                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2952                         ip_hlen = sizeof(struct ip6_hdr);
2953                         if (mp->m_len < ehdrlen + ip_hlen)
2954                                 return FALSE; /* failure */
2955                         ipproto = ip6->ip6_nxt;
2956                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2957                         break;
2958 #ifdef IXGBE_TIMESYNC
2959                 case ETHERTYPE_IEEE1588:
2960                         return (IXGBE_TIMESTAMP);
2961 #endif
2962                 default:
2963                         return (FALSE);
2964         }
2965
2966         vlan_macip_lens |= ip_hlen;
2967         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2968
2969         switch (ipproto) {
2970                 case IPPROTO_TCP:
2971                         if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2972                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2973                                 offload = TRUE;
2974                         }
2975                         break;
2976
2977                 case IPPROTO_UDP:
2978                 {
2979 #ifdef IXGBE_TIMESYNC
2980                         void *hdr = (caddr_t) ip + ip_hlen;
2981                         struct udphdr *uh = (struct udphdr *)hdr;
2982
2983                         if (uh->uh_dport == htons(TSYNC_UDP_PORT))
2984                                 return (IXGBE_TIMESTAMP);
2985 #endif
2986                         if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2987                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2988                                 offload = TRUE;
2989                         }
2990                         break;
2991
2992                 }
2993
2994                 default:
2995                         return (FALSE);
2996         }
2997
2998         if (offload != TRUE )
2999                 return (FALSE);
3000
3001         /* Now copy bits into descriptor */
3002         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3003         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3004         TXD->seqnum_seed = htole32(0);
3005         TXD->mss_l4len_idx = htole32(0);
3006
3007         tx_buffer->m_head = NULL;
3008         tx_buffer->eop_index = -1;
3009
3010         /* We've consumed the first desc, adjust counters */
3011         if (++ctxd == adapter->num_tx_desc)
3012                 ctxd = 0;
3013         txr->next_avail_tx_desc = ctxd;
3014         --txr->tx_avail;
3015
3016         return (TRUE);
3017 }
3018
3019 #if __FreeBSD_version >= 700000
3020 /**********************************************************************
3021  *
3022  *  Setup work for hardware segmentation offload (TSO) on
3023  *  adapters using advanced tx descriptors
3024  *
3025  **********************************************************************/
3026 static boolean_t
3027 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3028 {
3029         struct adapter *adapter = txr->adapter;
3030         struct ixgbe_adv_tx_context_desc *TXD;
3031         struct ixgbe_tx_buf        *tx_buffer;
3032         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3033         u32 mss_l4len_idx = 0;
3034         u16 vtag = 0;
3035         int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
3036         struct ether_vlan_header *eh;
3037         struct ip *ip;
3038         struct tcphdr *th;
3039
3040
3041         /*
3042          * Determine where frame payload starts.
3043          * Jump over vlan headers if already present
3044          */
3045         eh = mtod(mp, struct ether_vlan_header *);
3046         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
3047                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3048         else
3049                 ehdrlen = ETHER_HDR_LEN;
3050
3051         /* Ensure we have at least the IP+TCP header in the first mbuf. */
3052         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3053                 return FALSE;
3054
3055         ctxd = txr->next_avail_tx_desc;
3056         tx_buffer = &txr->tx_buffers[ctxd];
3057         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3058
3059         ip = (struct ip *)(mp->m_data + ehdrlen);
3060         if (ip->ip_p != IPPROTO_TCP)
3061                 return FALSE;   /* 0 */
3062         ip->ip_sum = 0;
3063         ip_hlen = ip->ip_hl << 2;
3064         th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3065         th->th_sum = in_pseudo(ip->ip_src.s_addr,
3066             ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3067         tcp_hlen = th->th_off << 2;
3068         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
3069
3070         /* This is used in the transmit desc in encap */
3071         *paylen = mp->m_pkthdr.len - hdrlen;
3072
3073         /* VLAN MACLEN IPLEN */
3074         if (mp->m_flags & M_VLANTAG) {
3075                 vtag = htole16(mp->m_pkthdr.ether_vtag);
3076                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3077         }
3078
3079         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3080         vlan_macip_lens |= ip_hlen;
3081         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3082
3083         /* ADV DTYPE TUCMD */
3084         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3085         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3086         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3087         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3088
3089
3090         /* MSS L4LEN IDX */
3091         mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3092         mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3093         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3094
3095         TXD->seqnum_seed = htole32(0);
3096         tx_buffer->m_head = NULL;
3097         tx_buffer->eop_index = -1;
3098
3099         if (++ctxd == adapter->num_tx_desc)
3100                 ctxd = 0;
3101
3102         txr->tx_avail--;
3103         txr->next_avail_tx_desc = ctxd;
3104         return TRUE;
3105 }
3106
3107 #else   /* For 6.2 RELEASE */
3108 /* This makes it easy to keep the code common */
3109 static boolean_t
3110 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
3111 {
3112         return (FALSE);
3113 }
3114 #endif
3115
3116 /**********************************************************************
3117  *
3118  *  Examine each tx_buffer in the used queue. If the hardware is done
3119  *  processing the packet then free associated resources. The
3120  *  tx_buffer is put back on the free queue.
3121  *
3122  **********************************************************************/
3123 static boolean_t
3124 ixgbe_txeof(struct tx_ring *txr)
3125 {
3126         struct adapter * adapter = txr->adapter;
3127         struct ifnet    *ifp = adapter->ifp;
3128         u32     first, last, done, num_avail;
3129         u32     cleaned = 0;
3130         struct ixgbe_tx_buf *tx_buffer;
3131         struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3132
3133         mtx_assert(&txr->tx_mtx, MA_OWNED);
3134
3135         if (txr->tx_avail == adapter->num_tx_desc)
3136                 return FALSE;
3137
3138         num_avail = txr->tx_avail;
3139         first = txr->next_tx_to_clean;
3140
3141         tx_buffer = &txr->tx_buffers[first];
3142         /* For cleanup we just use legacy struct */
3143         tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3144         last = tx_buffer->eop_index;
3145         if (last == -1)
3146                 return FALSE;
3147
3148         eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3149         /*
3150         ** Get the index of the first descriptor
3151         ** BEYOND the EOP and call that 'done'.
3152         ** I do this so the comparison in the
3153         ** inner while loop below can be simple
3154         */
3155         if (++last == adapter->num_tx_desc) last = 0;
3156         done = last;
3157
3158         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3159             BUS_DMASYNC_POSTREAD);
3160         /*
3161         ** Only the EOP descriptor of a packet now has the DD
3162         ** bit set, this is what we look for...
3163         */
3164         while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3165                 /* We clean the range of the packet */
3166                 while (first != done) {
3167                         tx_desc->upper.data = 0;
3168                         tx_desc->lower.data = 0;
3169                         tx_desc->buffer_addr = 0;
3170                         num_avail++; cleaned++;
3171
3172                         if (tx_buffer->m_head) {
3173                                 ifp->if_opackets++;
3174                                 bus_dmamap_sync(txr->txtag,
3175                                     tx_buffer->map,
3176                                     BUS_DMASYNC_POSTWRITE);
3177                                 bus_dmamap_unload(txr->txtag,
3178                                     tx_buffer->map);
3179                                 m_freem(tx_buffer->m_head);
3180                                 tx_buffer->m_head = NULL;
3181                                 tx_buffer->map = NULL;
3182                         }
3183                         tx_buffer->eop_index = -1;
3184
3185                         if (++first == adapter->num_tx_desc)
3186                                 first = 0;
3187
3188                         tx_buffer = &txr->tx_buffers[first];
3189                         tx_desc =
3190                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3191                 }
3192                 /* See if there is more work now */
3193                 last = tx_buffer->eop_index;
3194                 if (last != -1) {
3195                         eop_desc =
3196                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3197                         /* Get next done point */
3198                         if (++last == adapter->num_tx_desc) last = 0;
3199                         done = last;
3200                 } else
3201                         break;
3202         }
3203         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3204             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3205
3206         txr->next_tx_to_clean = first;
3207
3208         /*
3209          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
3210          * it is OK to send packets. If there are no pending descriptors,
3211          * clear the timeout. Otherwise, if some descriptors have been freed,
3212          * restart the timeout.
3213          */
3214         if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
3215                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3216                 /* If all are clean turn off the timer */
3217                 if (num_avail == adapter->num_tx_desc) {
3218                         txr->watchdog_timer = 0;
3219                         txr->tx_avail = num_avail;
3220                         return FALSE;
3221                 }
3222         }
3223
3224         /* Some were cleaned, so reset timer */
3225         if (cleaned)
3226                 txr->watchdog_timer = IXGBE_TX_TIMEOUT;
3227         txr->tx_avail = num_avail;
3228         return TRUE;
3229 }
3230
3231 /*********************************************************************
3232  *
3233  *  Get a buffer from system mbuf buffer pool.
3234  *
3235  **********************************************************************/
3236 static int
3237 ixgbe_get_buf(struct rx_ring *rxr, int i, u8 clean)
3238 {
3239         struct adapter          *adapter = rxr->adapter;
3240         bus_dma_segment_t       seg[2];
3241         struct ixgbe_rx_buf     *rxbuf;
3242         struct mbuf             *mh, *mp;
3243         bus_dmamap_t            map;
3244         int                     nsegs, error;
3245         int                     merr = 0;
3246
3247
3248         rxbuf = &rxr->rx_buffers[i];
3249
3250         /* First get our header and payload mbuf */
3251         if (clean & IXGBE_CLEAN_HDR) {
3252                 mh = m_gethdr(M_DONTWAIT, MT_DATA);
3253                 if (mh == NULL)
3254                         goto remap;
3255         } else  /* reuse */
3256                 mh = rxr->rx_buffers[i].m_head;
3257
3258         mh->m_len = MHLEN;
3259         mh->m_flags |= M_PKTHDR;
3260
3261         if (clean & IXGBE_CLEAN_PKT) {
3262                 mp = m_getjcl(M_DONTWAIT, MT_DATA,
3263                     M_PKTHDR, adapter->rx_mbuf_sz);
3264                 if (mp == NULL)
3265                         goto remap;
3266                 mp->m_len = adapter->rx_mbuf_sz;
3267                 mp->m_flags &= ~M_PKTHDR;
3268         } else {        /* reusing */
3269                 mp = rxr->rx_buffers[i].m_pack;
3270                 mp->m_len = adapter->rx_mbuf_sz;
3271                 mp->m_flags &= ~M_PKTHDR;
3272         }
3273         /*
3274         ** Need to create a chain for the following
3275         ** dmamap call at this point.
3276         */
3277         mh->m_next = mp;
3278         mh->m_pkthdr.len = mh->m_len + mp->m_len;
3279
3280         /* Get the memory mapping */
3281         error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3282             rxr->spare_map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3283         if (error != 0) {
3284                 printf("GET BUF: dmamap load failure - %d\n", error);
3285                 m_free(mh);
3286                 return (error);
3287         }
3288
3289         /* Unload old mapping and update buffer struct */
3290         if (rxbuf->m_head != NULL)
3291                 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3292         map = rxbuf->map;
3293         rxbuf->map = rxr->spare_map;
3294         rxr->spare_map = map;
3295         rxbuf->m_head = mh;
3296         rxbuf->m_pack = mp;
3297         bus_dmamap_sync(rxr->rxtag,
3298             rxbuf->map, BUS_DMASYNC_PREREAD);
3299
3300         /* Update descriptor */
3301         rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3302         rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3303
3304         return (0);
3305
3306         /*
3307         ** If we get here, we have an mbuf resource
3308         ** issue, so we discard the incoming packet
3309         ** and attempt to reuse existing mbufs next
3310         ** pass thru the ring, but to do so we must
3311         ** fix up the descriptor which had the address
3312         ** clobbered with writeback info.
3313         */
3314 remap:
3315         adapter->mbuf_header_failed++;
3316         merr = ENOBUFS;
3317         /* Is there a reusable buffer? */
3318         mh = rxr->rx_buffers[i].m_head;
3319         if (mh == NULL) /* Nope, init error */
3320                 return (merr);
3321         mp = rxr->rx_buffers[i].m_pack;
3322         if (mp == NULL) /* Nope, init error */
3323                 return (merr);
3324         /* Get our old mapping */
3325         rxbuf = &rxr->rx_buffers[i];
3326         error = bus_dmamap_load_mbuf_sg(rxr->rxtag,
3327             rxbuf->map, mh, seg, &nsegs, BUS_DMA_NOWAIT);
3328         if (error != 0) {
3329                 /* We really have a problem */
3330                 m_free(mh);
3331                 return (error);
3332         }
3333         /* Now fix the descriptor as needed */
3334         rxr->rx_base[i].read.hdr_addr = htole64(seg[0].ds_addr);
3335         rxr->rx_base[i].read.pkt_addr = htole64(seg[1].ds_addr);
3336
3337         return (merr);
3338 }
3339
3340
3341 /*********************************************************************
3342  *
3343  *  Allocate memory for rx_buffer structures. Since we use one
3344  *  rx_buffer per received packet, the maximum number of rx_buffer's
3345  *  that we'll need is equal to the number of receive descriptors
3346  *  that we've allocated.
3347  *
3348  **********************************************************************/
3349 static int
3350 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3351 {
3352         struct  adapter         *adapter = rxr->adapter;
3353         device_t                dev = adapter->dev;
3354         struct ixgbe_rx_buf     *rxbuf;
3355         int                     i, bsize, error;
3356
3357         bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3358         if (!(rxr->rx_buffers =
3359             (struct ixgbe_rx_buf *) malloc(bsize,
3360             M_DEVBUF, M_NOWAIT | M_ZERO))) {
3361                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3362                 error = ENOMEM;
3363                 goto fail;
3364         }
3365
3366         /*
3367         ** The tag is made to accomodate the largest buffer size
3368         ** with packet split (hence the two segments, even though
3369         ** it may not always use this.
3370         */
3371         if ((error = bus_dma_tag_create(NULL,           /* parent */
3372                                    1, 0,        /* alignment, bounds */
3373                                    BUS_SPACE_MAXADDR,   /* lowaddr */
3374                                    BUS_SPACE_MAXADDR,   /* highaddr */
3375                                    NULL, NULL,          /* filter, filterarg */
3376                                    MJUM16BYTES,         /* maxsize */
3377                                    2,                   /* nsegments */
3378                                    MJUMPAGESIZE,        /* maxsegsize */
3379                                    0,                   /* flags */
3380                                    NULL,                /* lockfunc */
3381                                    NULL,                /* lockfuncarg */
3382                                    &rxr->rxtag))) {
3383                 device_printf(dev, "Unable to create RX DMA tag\n");
3384                 goto fail;
3385         }
3386
3387         /* Create the spare map (used by getbuf) */
3388         error = bus_dmamap_create(rxr->rxtag, BUS_DMA_NOWAIT,
3389              &rxr->spare_map);
3390         if (error) {
3391                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
3392                     __func__, error);
3393                 goto fail;
3394         }
3395
3396         for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3397                 rxbuf = &rxr->rx_buffers[i];
3398                 error = bus_dmamap_create(rxr->rxtag,
3399                     BUS_DMA_NOWAIT, &rxbuf->map);
3400                 if (error) {
3401                         device_printf(dev, "Unable to create RX DMA map\n");
3402                         goto fail;
3403                 }
3404         }
3405
3406         return (0);
3407
3408 fail:
3409         /* Frees all, but can handle partial completion */
3410         ixgbe_free_receive_structures(adapter);
3411         return (error);
3412 }
3413
3414 /*********************************************************************
3415  *
3416  *  Initialize a receive ring and its buffers.
3417  *
3418  **********************************************************************/
3419 static int
3420 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3421 {
3422         struct  adapter         *adapter;
3423         device_t                dev;
3424         struct ixgbe_rx_buf     *rxbuf;
3425         struct lro_ctrl         *lro = &rxr->lro;
3426         int                     j, rsize;
3427
3428         adapter = rxr->adapter;
3429         dev = adapter->dev;
3430
3431         /* Clear the ring contents */
3432         rsize = roundup2(adapter->num_rx_desc *
3433             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3434         bzero((void *)rxr->rx_base, rsize);
3435
3436         /*
3437         ** Free current RX buffer structs and their mbufs
3438         */
3439         for (int i = 0; i < adapter->num_rx_desc; i++) {
3440                 rxbuf = &rxr->rx_buffers[i];
3441                 if (rxbuf->m_head != NULL) {
3442                         bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3443                             BUS_DMASYNC_POSTREAD);
3444                         bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3445                         if (rxbuf->m_head) {
3446                                 rxbuf->m_head->m_next = rxbuf->m_pack;
3447                                 m_freem(rxbuf->m_head);
3448                         }
3449                         rxbuf->m_head = NULL;
3450                         rxbuf->m_pack = NULL;
3451                 }
3452         }
3453
3454         /* Now refresh the mbufs */
3455         for (j = 0; j < adapter->num_rx_desc; j++) {
3456                 if (ixgbe_get_buf(rxr, j, IXGBE_CLEAN_ALL) == ENOBUFS) {
3457                         rxr->rx_buffers[j].m_head = NULL;
3458                         rxr->rx_buffers[j].m_pack = NULL;
3459                         rxr->rx_base[j].read.hdr_addr = 0;
3460                         rxr->rx_base[j].read.pkt_addr = 0;
3461                         goto fail;
3462                 }
3463         }
3464
3465         /* Setup our descriptor indices */
3466         rxr->next_to_check = 0;
3467         rxr->last_cleaned = 0;
3468
3469         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3470             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3471
3472         /* Now set up the LRO interface */
3473         if (ixgbe_enable_lro) {
3474                 int err = tcp_lro_init(lro);
3475                 if (err) {
3476                         INIT_DEBUGOUT("LRO Initialization failed!\n");
3477                         goto fail;
3478                 }
3479                 INIT_DEBUGOUT("RX LRO Initialized\n");
3480                 lro->ifp = adapter->ifp;
3481         }
3482
3483         return (0);
3484
3485 fail:
3486         /*
3487          * We need to clean up any buffers allocated
3488          * so far, 'j' is the failing index.
3489          */
3490         for (int i = 0; i < j; i++) {
3491                 rxbuf = &rxr->rx_buffers[i];
3492                 if (rxbuf->m_head != NULL) {
3493                         bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3494                             BUS_DMASYNC_POSTREAD);
3495                         bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3496                         m_freem(rxbuf->m_head);
3497                         rxbuf->m_head = NULL;
3498                 }
3499         }
3500         return (ENOBUFS);
3501 }
3502
3503 /*********************************************************************
3504  *
3505  *  Initialize all receive rings.
3506  *
3507  **********************************************************************/
3508 static int
3509 ixgbe_setup_receive_structures(struct adapter *adapter)
3510 {
3511         struct rx_ring *rxr = adapter->rx_rings;
3512         int j;
3513
3514         for (j = 0; j < adapter->num_rx_queues; j++, rxr++)
3515                 if (ixgbe_setup_receive_ring(rxr))
3516                         goto fail;
3517
3518         return (0);
3519 fail:
3520         /*
3521          * Free RX buffers allocated so far, we will only handle
3522          * the rings that completed, the failing case will have
3523          * cleaned up for itself. 'j' failed, so its the terminus.
3524          */
3525         for (int i = 0; i < j; ++i) {
3526                 rxr = &adapter->rx_rings[i];
3527                 for (int n = 0; n < adapter->num_rx_desc; n++) {
3528                         struct ixgbe_rx_buf *rxbuf;
3529                         rxbuf = &rxr->rx_buffers[n];
3530                         if (rxbuf->m_head != NULL) {
3531                                 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3532                                   BUS_DMASYNC_POSTREAD);
3533                                 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3534                                 m_freem(rxbuf->m_head);
3535                                 rxbuf->m_head = NULL;
3536                         }
3537                 }
3538         }
3539
3540         return (ENOBUFS);
3541 }
3542
3543 /*********************************************************************
3544  *
3545  *  Setup receive registers and features.
3546  *
3547  **********************************************************************/
3548 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3549
3550 static void
3551 ixgbe_initialize_receive_units(struct adapter *adapter)
3552 {
3553         struct  rx_ring *rxr = adapter->rx_rings;
3554         struct ixgbe_hw *hw = &adapter->hw;
3555         struct ifnet   *ifp = adapter->ifp;
3556         u32             rxctrl, fctrl, srrctl, rxcsum;
3557         u32             reta, mrqc = 0, hlreg, random[10];
3558
3559
3560         /*
3561          * Make sure receives are disabled while
3562          * setting up the descriptor ring
3563          */
3564         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3565         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3566             rxctrl & ~IXGBE_RXCTRL_RXEN);
3567
3568         /* Enable broadcasts */
3569         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3570         fctrl |= IXGBE_FCTRL_BAM;
3571         fctrl |= IXGBE_FCTRL_DPF;
3572         fctrl |= IXGBE_FCTRL_PMCF;
3573         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3574
3575         srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(0));
3576         srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3577         srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3578
3579         hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3580         /* Set for Jumbo Frames? */
3581         if (ifp->if_mtu > ETHERMTU) {
3582                 hlreg |= IXGBE_HLREG0_JUMBOEN;
3583                 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3584         } else {
3585                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3586                 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3587         }
3588         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3589
3590         if (ixgbe_rx_hdr_split) {
3591                 /* Use a standard mbuf for the header */
3592                 srrctl |= ((IXGBE_RX_HDR << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3593                     & IXGBE_SRRCTL_BSIZEHDR_MASK);
3594                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3595                 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3596                         /* PSRTYPE must be initialized in 82599 */
3597                         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
3598                                       IXGBE_PSRTYPE_UDPHDR |
3599                                       IXGBE_PSRTYPE_IPV4HDR |
3600                                       IXGBE_PSRTYPE_IPV6HDR;
3601                         psrtype |= (7 << 29);
3602                         IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
3603                 }
3604         } else
3605                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3606
3607         if (adapter->hw.mac.type == ixgbe_mac_82599EB)
3608                 srrctl |= IXGBE_SRRCTL_DROP_EN;
3609
3610         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(0), srrctl);
3611
3612         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3613                 u64 rdba = rxr->rxdma.dma_paddr;
3614                 /* Setup the Base and Length of the Rx Descriptor Ring */
3615                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
3616                                (rdba & 0x00000000ffffffffULL));
3617                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
3618                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
3619                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3620
3621                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3622                 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
3623                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
3624         }
3625
3626         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3627
3628         /* Setup RSS */
3629         if (adapter->num_rx_queues > 1) {
3630                 int i, j;
3631                 reta = 0;
3632
3633                 /* set up random bits */
3634                 arc4rand(&random, sizeof(random), 0);
3635
3636                 /* Set up the redirection table */
3637                 for (i = 0, j = 0; i < 128; i++, j++) {
3638                         if (j == adapter->num_rx_queues) j = 0;
3639                         reta = (reta << 8) | (j * 0x11);
3640                         if ((i & 3) == 3)
3641                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3642                 }
3643
3644                 /* Now fill our hash function seeds */
3645                 for (int i = 0; i < 10; i++)
3646                         IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
3647
3648                 /* Perform hash on these packet types */
3649                 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
3650                         mrqc = IXGBE_MRQC_VMDQRSS32EN;
3651
3652                 mrqc |= IXGBE_MRQC_RSSEN
3653                      | IXGBE_MRQC_RSS_FIELD_IPV4
3654                      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3655                      | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
3656                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
3657                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX
3658                      | IXGBE_MRQC_RSS_FIELD_IPV6
3659                      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
3660                      | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
3661                      | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
3662                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3663
3664                 /* RSS and RX IPP Checksum are mutually exclusive */
3665                 rxcsum |= IXGBE_RXCSUM_PCSD;
3666         }
3667
3668         if (ifp->if_capenable & IFCAP_RXCSUM)
3669                 rxcsum |= IXGBE_RXCSUM_PCSD;
3670
3671         if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3672                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3673
3674         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3675
3676         if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3677                 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3678                 rdrxctl |= IXGBE_RDRXCTL_AGGDIS;
3679                 rdrxctl |= IXGBE_RDRXCTL_RSCLLIDIS;
3680                 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3681                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3682         }
3683
3684         return;
3685 }
3686
3687 /*********************************************************************
3688  *
3689  *  Free all receive rings.
3690  *
3691  **********************************************************************/
3692 static void
3693 ixgbe_free_receive_structures(struct adapter *adapter)
3694 {
3695         struct rx_ring *rxr = adapter->rx_rings;
3696
3697         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3698                 struct lro_ctrl         *lro = &rxr->lro;
3699                 ixgbe_free_receive_buffers(rxr);
3700                 /* Free LRO memory */
3701                 tcp_lro_free(lro);
3702                 /* Free the ring memory as well */
3703                 ixgbe_dma_free(adapter, &rxr->rxdma);
3704         }
3705
3706         free(adapter->rx_rings, M_DEVBUF);
3707 }
3708
3709 /*********************************************************************
3710  *
3711  *  Free receive ring data structures
3712  *
3713  **********************************************************************/
3714 void
3715 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3716 {
3717         struct adapter          *adapter = NULL;
3718         struct ixgbe_rx_buf     *rxbuf = NULL;
3719
3720         INIT_DEBUGOUT("free_receive_buffers: begin");
3721         adapter = rxr->adapter;
3722         if (rxr->rx_buffers != NULL) {
3723                 rxbuf = &rxr->rx_buffers[0];
3724                 for (int i = 0; i < adapter->num_rx_desc; i++) {
3725                         if (rxbuf->map != NULL) {
3726                                 bus_dmamap_sync(rxr->rxtag, rxbuf->map,
3727                                     BUS_DMASYNC_POSTREAD);
3728                                 bus_dmamap_unload(rxr->rxtag, rxbuf->map);
3729                                 bus_dmamap_destroy(rxr->rxtag, rxbuf->map);
3730                         }
3731                         if (rxbuf->m_head != NULL) {
3732                                 m_freem(rxbuf->m_head);
3733                         }
3734                         rxbuf->m_head = NULL;
3735                         ++rxbuf;
3736                 }
3737         }
3738         if (rxr->rx_buffers != NULL) {
3739                 free(rxr->rx_buffers, M_DEVBUF);
3740                 rxr->rx_buffers = NULL;
3741         }
3742         if (rxr->rxtag != NULL) {
3743                 bus_dma_tag_destroy(rxr->rxtag);
3744                 rxr->rxtag = NULL;
3745         }
3746         return;
3747 }
3748
3749 /*********************************************************************
3750  *
3751  *  This routine executes in interrupt context. It replenishes
3752  *  the mbufs in the descriptor and sends data which has been
3753  *  dma'ed into host memory to upper layer.
3754  *
3755  *  We loop at most count times if count is > 0, or until done if
3756  *  count < 0.
3757  *
3758  *  Return TRUE for more work, FALSE for all clean.
3759  *********************************************************************/
3760 static bool
3761 ixgbe_rxeof(struct rx_ring *rxr, int count)
3762 {
3763         struct adapter          *adapter = rxr->adapter;
3764         struct ifnet            *ifp = adapter->ifp;
3765         struct lro_ctrl         *lro = &rxr->lro;
3766         struct lro_entry        *queued;
3767         int                     i;
3768         u32                     staterr;
3769         union ixgbe_adv_rx_desc *cur;
3770
3771
3772         IXGBE_RX_LOCK(rxr);
3773         i = rxr->next_to_check;
3774         cur = &rxr->rx_base[i];
3775         staterr = cur->wb.upper.status_error;
3776
3777         if (!(staterr & IXGBE_RXD_STAT_DD)) {
3778                 IXGBE_RX_UNLOCK(rxr);
3779                 return FALSE;
3780         }
3781
3782         /* Sync the ring */
3783         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3784             BUS_DMASYNC_POSTREAD);
3785
3786         while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3787             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3788                 struct mbuf     *sendmp, *mh, *mp;
3789                 u16             hlen, plen, hdr;        
3790                 u8              dopayload, accept_frame, eop;
3791
3792
3793                 accept_frame = 1;
3794                 hlen = plen = 0;
3795                 sendmp = mh = mp = NULL;
3796
3797                 /* Sync the buffers */
3798                 bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[i].map,
3799                             BUS_DMASYNC_POSTREAD);
3800
3801                 /*
3802                 ** The way the hardware is configured to
3803                 ** split, it will ONLY use the header buffer
3804                 ** when header split is enabled, otherwise we
3805                 ** get normal behavior, ie, both header and
3806                 ** payload are DMA'd into the payload buffer.
3807                 **
3808                 ** The fmp test is to catch the case where a
3809                 ** packet spans multiple descriptors, in that
3810                 ** case only the first header is valid.
3811                 */
3812                 if ((ixgbe_rx_hdr_split) && (rxr->fmp == NULL)){
3813                         hdr = le16toh(cur->
3814                             wb.lower.lo_dword.hs_rss.hdr_info);
3815                         hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3816                             IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3817                         if (hlen > IXGBE_RX_HDR)
3818                                 hlen = IXGBE_RX_HDR;
3819                         plen = le16toh(cur->wb.upper.length);
3820                         /* Handle the header mbuf */
3821                         mh = rxr->rx_buffers[i].m_head;
3822                         mh->m_len = hlen;
3823                         dopayload = IXGBE_CLEAN_HDR;
3824                         /*
3825                         ** Get the payload length, this
3826                         ** could be zero if its a small
3827                         ** packet.
3828                         */
3829                         if (plen) {
3830                                 mp = rxr->rx_buffers[i].m_pack;
3831                                 mp->m_len = plen;
3832                                 mp->m_next = NULL;
3833                                 mp->m_flags &= ~M_PKTHDR;
3834                                 mh->m_next = mp;
3835                                 mh->m_flags |= M_PKTHDR;
3836                                 dopayload = IXGBE_CLEAN_ALL;
3837                                 rxr->rx_split_packets++;
3838                         } else {  /* small packets */
3839                                 mh->m_flags &= ~M_PKTHDR;
3840                                 mh->m_next = NULL;
3841                         }
3842                 } else {
3843                         /*
3844                         ** Either no header split, or a
3845                         ** secondary piece of a fragmented
3846                         ** split packet.
3847                         */
3848                         mh = rxr->rx_buffers[i].m_pack;
3849                         mh->m_flags |= M_PKTHDR;
3850                         mh->m_len = le16toh(cur->wb.upper.length);
3851                         dopayload = IXGBE_CLEAN_PKT;
3852                 }
3853
3854                 if (staterr & IXGBE_RXD_STAT_EOP) {
3855                         count--;
3856                         eop = 1;
3857                 } else 
3858                         eop = 0;
3859
3860                 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3861                         accept_frame = 0;
3862
3863                 if (accept_frame) {
3864                         if (ixgbe_get_buf(rxr, i, dopayload) != 0) {
3865                                 ifp->if_iqdrops++;
3866                                 goto discard;
3867                         }
3868                         /* Initial frame - setup */
3869                         if (rxr->fmp == NULL) {
3870                                 mh->m_flags |= M_PKTHDR;
3871                                 mh->m_pkthdr.len = mh->m_len;
3872                                 rxr->fmp = mh; /* Store the first mbuf */
3873                                 rxr->lmp = mh;
3874                                 if (mp) { /* Add payload if split */
3875                                         mh->m_pkthdr.len += mp->m_len;
3876                                         rxr->lmp = mh->m_next;
3877                                 }
3878                         } else {
3879                                 /* Chain mbuf's together */
3880                                 mh->m_flags &= ~M_PKTHDR;
3881                                 rxr->lmp->m_next = mh;
3882                                 rxr->lmp = rxr->lmp->m_next;
3883                                 rxr->fmp->m_pkthdr.len += mh->m_len;
3884                         }
3885
3886                         if (eop) {
3887                                 rxr->fmp->m_pkthdr.rcvif = ifp;
3888                                 ifp->if_ipackets++;
3889                                 rxr->rx_packets++;
3890                                 /* capture data for AIM */
3891                                 rxr->bytes += rxr->fmp->m_pkthdr.len;
3892                                 rxr->rx_bytes += rxr->bytes;
3893                                 if (ifp->if_capenable & IFCAP_RXCSUM)
3894                                         ixgbe_rx_checksum(staterr, rxr->fmp);
3895                                 else
3896                                         rxr->fmp->m_pkthdr.csum_flags = 0;
3897                                 if (staterr & IXGBE_RXD_STAT_VP) {
3898 #if __FreeBSD_version >= 700000
3899                                         rxr->fmp->m_pkthdr.ether_vtag =
3900                                             le16toh(cur->wb.upper.vlan);
3901                                         rxr->fmp->m_flags |= M_VLANTAG;
3902 #else
3903                                         VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3904                                             (le16toh(cur->wb.upper.vlan) &
3905                                             IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3906 #endif
3907                                 }
3908                                 sendmp = rxr->fmp;
3909                                 rxr->fmp = NULL;
3910                                 rxr->lmp = NULL;
3911                         }
3912                 } else {
3913                         ifp->if_ierrors++;
3914 discard:
3915                         /* Reuse loaded DMA map and just update mbuf chain */
3916                         if (hlen) {
3917                                 mh = rxr->rx_buffers[i].m_head;
3918                                 mh->m_len = MHLEN;
3919                                 mh->m_next = NULL;
3920                         }
3921                         mp = rxr->rx_buffers[i].m_pack;
3922                         mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3923                         mp->m_data = mp->m_ext.ext_buf;
3924                         mp->m_next = NULL;
3925                         if (adapter->max_frame_size <=
3926                             (MCLBYTES - ETHER_ALIGN))
3927                                 m_adj(mp, ETHER_ALIGN);
3928                         if (rxr->fmp != NULL) {
3929                                 /* handles the whole chain */
3930                                 m_freem(rxr->fmp);
3931                                 rxr->fmp = NULL;
3932                                 rxr->lmp = NULL;
3933                         }
3934                         sendmp = NULL;
3935                 }
3936                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3937                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3938
3939                 rxr->last_cleaned = i; /* for updating tail */
3940
3941                 if (++i == adapter->num_rx_desc)
3942                         i = 0;
3943
3944                 /*
3945                 ** Now send up to the stack,
3946                 ** note the the value of next_to_check
3947                 ** is safe because we keep the RX lock
3948                 ** thru this call.
3949                 */
3950                 if (sendmp != NULL) {
3951                         /* Use LRO if possible */
3952                         if ((!lro->lro_cnt) || (tcp_lro_rx(lro, sendmp, 0)))
3953                                 (*ifp->if_input)(ifp, sendmp);
3954                 }
3955
3956                 /* Get next descriptor */
3957                 cur = &rxr->rx_base[i];
3958                 staterr = cur->wb.upper.status_error;
3959         }
3960         rxr->next_to_check = i;
3961
3962         /* Advance the IXGB's Receive Queue "Tail Pointer" */
3963         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3964
3965         /*
3966          * Flush any outstanding LRO work
3967          */
3968         while (!SLIST_EMPTY(&lro->lro_active)) {
3969                 queued = SLIST_FIRST(&lro->lro_active);
3970                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3971                 tcp_lro_flush(lro, queued);
3972         }
3973
3974         IXGBE_RX_UNLOCK(rxr);
3975
3976         /*
3977         ** Leaving with more to clean?
3978         ** then schedule another interrupt.
3979         */
3980         if (staterr & IXGBE_RXD_STAT_DD) {
3981                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, rxr->eims);
3982                 return TRUE;
3983         }
3984
3985         return FALSE;
3986 }
3987
3988 /*********************************************************************
3989  *
3990  *  Verify that the hardware indicated that the checksum is valid.
3991  *  Inform the stack about the status of checksum so that stack
3992  *  doesn't spend time verifying the checksum.
3993  *
3994  *********************************************************************/
3995 static void
3996 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp)
3997 {
3998         u16 status = (u16) staterr;
3999         u8  errors = (u8) (staterr >> 24);
4000
4001         if (status & IXGBE_RXD_STAT_IPCS) {
4002                 /* Did it pass? */
4003                 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4004                         /* IP Checksum Good */
4005                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4006                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4007
4008                 } else
4009                         mp->m_pkthdr.csum_flags = 0;
4010         }
4011         if (status & IXGBE_RXD_STAT_L4CS) {
4012                 /* Did it pass? */
4013                 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4014                         mp->m_pkthdr.csum_flags |=
4015                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4016                         mp->m_pkthdr.csum_data = htons(0xffff);
4017                 } 
4018         }
4019         return;
4020 }
4021
4022
4023 #ifdef IXGBE_HW_VLAN_SUPPORT
4024 /*
4025  * This routine is run via an vlan
4026  * config EVENT
4027  */
4028 static void
4029 ixgbe_register_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4030 {
4031         struct adapter  *adapter = ifp->if_softc;
4032         u32             ctrl, rctl, index, vfta;
4033
4034         ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
4035         ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
4036         ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4037         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
4038
4039         /* Make entry in the hardware filter table */
4040         ixgbe_set_vfta(&adapter->hw, vtag, 0, TRUE);
4041 }
4042
4043 /*
4044  * This routine is run via an vlan
4045  * unconfig EVENT
4046  */
4047 static void
4048 ixgbe_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag)
4049 {
4050         struct adapter  *adapter = ifp->if_softc;
4051         u32             index, vfta;
4052
4053         /* Remove entry in the hardware filter table */
4054         ixgbe_set_vfta(&adapter->hw, vtag, 0, FALSE);
4055
4056         /* Have all vlans unregistered? */
4057         if (adapter->ifp->if_vlantrunk == NULL) {
4058                 u32 ctrl;
4059                 /* Turn off the filter table */
4060                 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
4061                 ctrl &= ~IXGBE_VLNCTRL_VME;
4062                 ctrl &=  ~IXGBE_VLNCTRL_VFE;
4063                 ctrl |= IXGBE_VLNCTRL_CFIEN;
4064                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
4065         }
4066 }
4067 #endif
4068
4069 static void
4070 ixgbe_enable_intr(struct adapter *adapter)
4071 {
4072         struct ixgbe_hw *hw = &adapter->hw;
4073         u32 mask = IXGBE_EIMS_ENABLE_MASK;
4074
4075
4076         /* Enable Fan Failure detection */
4077         if (hw->device_id == IXGBE_DEV_ID_82598AT)
4078                     mask |= IXGBE_EIMS_GPI_SDP1;
4079
4080         /* 82599 specific interrupts */
4081         if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4082                     mask |= IXGBE_EIMS_ECC;
4083                     mask |= IXGBE_EIMS_GPI_SDP1;
4084                     mask |= IXGBE_EIMS_GPI_SDP2;
4085         }
4086
4087         IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4088
4089         /* With RSS we use auto clear */
4090         if (adapter->msix_mem) {
4091                 mask = IXGBE_EIMS_ENABLE_MASK;
4092                 /* Dont autoclear Link */
4093                 mask &= ~IXGBE_EIMS_OTHER;
4094                 mask &= ~IXGBE_EIMS_LSC;
4095                 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4096         }
4097
4098         IXGBE_WRITE_FLUSH(hw);
4099
4100         return;
4101 }
4102
4103 static void
4104 ixgbe_disable_intr(struct adapter *adapter)
4105 {
4106         if (adapter->msix_mem)
4107                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4108         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4109         IXGBE_WRITE_FLUSH(&adapter->hw);
4110         return;
4111 }
4112
4113 u16
4114 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4115 {
4116         u16 value;
4117
4118         value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4119             reg, 2);
4120
4121         return (value);
4122 }
4123
4124 void
4125 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4126 {
4127         pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4128             reg, value, 2);
4129
4130         return;
4131 }
4132
4133 /*
4134 ** Setup the correct IVAR register for a particular MSIX interrupt
4135 **   (yes this is all very magic and confusing :)
4136 **  - entry is the register array entry
4137 **  - vector is the MSIX vector for this queue
4138 **  - type is RX/TX/MISC
4139 */
4140 static void
4141 ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector, s8 type)
4142 {
4143         struct ixgbe_hw *hw = &adapter->hw;
4144         u32 ivar, index;
4145
4146         vector |= IXGBE_IVAR_ALLOC_VAL;
4147
4148         switch (hw->mac.type) {
4149
4150         case ixgbe_mac_82598EB:
4151                 if (type == -1)
4152                         entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4153                 else
4154                         entry += (type * 64);
4155                 index = (entry >> 2) & 0x1F;
4156                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4157                 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4158                 ivar |= (vector << (8 * (entry & 0x3)));
4159                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4160                 break;
4161
4162         case ixgbe_mac_82599EB:
4163                 if (type == -1) { /* MISC IVAR */
4164                         index = (entry & 1) * 8;
4165                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4166                         ivar &= ~(0xFF << index);
4167                         ivar |= (vector << index);
4168                         IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4169                 } else {        /* RX/TX IVARS */
4170                         index = (16 * (entry & 1)) + (8 * type);
4171                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4172                         ivar &= ~(0xFF << index);
4173                         ivar |= (vector << index);
4174                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4175                 }
4176
4177         default:
4178                 break;
4179         }
4180 }
4181
4182 static void
4183 ixgbe_configure_ivars(struct adapter *adapter)
4184 {
4185         struct  tx_ring *txr = adapter->tx_rings;
4186         struct  rx_ring *rxr = adapter->rx_rings;
4187
4188         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++)
4189                 ixgbe_set_ivar(adapter, i, rxr->msix, 0);
4190
4191         for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
4192                 ixgbe_set_ivar(adapter, i, txr->msix, 1);
4193
4194         /* For the Link interrupt */
4195         ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4196 }
4197
4198 /*
4199 ** ixgbe_sfp_probe - called in the local timer to
4200 ** determine if a port had optics inserted.
4201 */  
4202 static bool ixgbe_sfp_probe(struct adapter *adapter)
4203 {
4204         struct ixgbe_hw *hw = &adapter->hw;
4205         device_t        dev = adapter->dev;
4206         bool            result = FALSE;
4207
4208         if ((hw->phy.type == ixgbe_phy_nl) &&
4209             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4210                 s32 ret = hw->phy.ops.identify_sfp(hw);
4211                 if (ret)
4212                         goto out;
4213                 ret = hw->phy.ops.reset(hw);
4214                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4215                         device_printf(dev,"Unsupported SFP+ module detected!");
4216                         printf(" Reload driver with supported module.\n");
4217                         adapter->sfp_probe = FALSE;
4218                         goto out;
4219                 } else
4220                         device_printf(dev,"SFP+ module detected!\n");
4221                 /* We now have supported optics */
4222                 adapter->sfp_probe = FALSE;
4223                 result = TRUE;
4224         }
4225 out:
4226         return (result);
4227 }
4228
4229 /*
4230 ** Tasklet handler for MSIX Link interrupts
4231 **  - do outside interrupt since it might sleep
4232 */
4233 static void
4234 ixgbe_handle_link(void *context, int pending)
4235 {
4236         struct adapter  *adapter = context;
4237
4238         ixgbe_check_link(&adapter->hw,
4239             &adapter->link_speed, &adapter->link_up, 0);
4240         ixgbe_update_link_status(adapter);
4241 }
4242
4243 /*
4244 ** Tasklet for handling SFP module interrupts
4245 */
4246 static void
4247 ixgbe_handle_mod(void *context, int pending)
4248 {
4249         struct adapter  *adapter = context;
4250         struct ixgbe_hw *hw = &adapter->hw;
4251         device_t        dev = adapter->dev;
4252         u32 err;
4253
4254         err = hw->phy.ops.identify_sfp(hw);
4255         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4256                 device_printf(dev,
4257                     "Unsupported SFP+ module type was detected.\n");
4258                 return;
4259         }
4260         hw->mac.ops.setup_sfp(hw);
4261         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4262         return;
4263 }
4264
4265
4266 /*
4267 ** Tasklet for handling MSF (multispeed fiber) interrupts
4268 */
4269 static void
4270 ixgbe_handle_msf(void *context, int pending)
4271 {
4272         struct adapter  *adapter = context;
4273         struct ixgbe_hw *hw = &adapter->hw;
4274         u32 autoneg;
4275
4276         if (hw->mac.ops.get_link_capabilities)
4277                 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4278                                                   &hw->mac.autoneg);
4279         if (hw->mac.ops.setup_link_speed)
4280                 hw->mac.ops.setup_link_speed(hw, autoneg, TRUE, TRUE);
4281         ixgbe_check_link(&adapter->hw,
4282             &adapter->link_speed, &adapter->link_up, 0);
4283         ixgbe_update_link_status(adapter);
4284         return;
4285 }
4286
4287 /**********************************************************************
4288  *
4289  *  Update the board statistics counters.
4290  *
4291  **********************************************************************/
4292 static void
4293 ixgbe_update_stats_counters(struct adapter *adapter)
4294 {
4295         struct ifnet   *ifp = adapter->ifp;;
4296         struct ixgbe_hw *hw = &adapter->hw;
4297         u32  missed_rx = 0, bprc, lxon, lxoff, total;
4298
4299         adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4300
4301         for (int i = 0; i < 8; i++) {
4302                 int mp;
4303                 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4304                 missed_rx += mp;
4305                 adapter->stats.mpc[i] += mp;
4306                 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4307         }
4308
4309         /* Hardware workaround, gprc counts missed packets */
4310         adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4311         adapter->stats.gprc -= missed_rx;
4312
4313         adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4314         adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4315         adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4316
4317         /*
4318          * Workaround: mprc hardware is incorrectly counting
4319          * broadcasts, so for now we subtract those.
4320          */
4321         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4322         adapter->stats.bprc += bprc;
4323         adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4324         adapter->stats.mprc -= bprc;
4325
4326         adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4327         adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4328         adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4329         adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4330         adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4331         adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4332         adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4333         adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4334
4335         adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4336         adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4337
4338         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4339         adapter->stats.lxontxc += lxon;
4340         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4341         adapter->stats.lxofftxc += lxoff;
4342         total = lxon + lxoff;
4343
4344         adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4345         adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4346         adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4347         adapter->stats.gptc -= total;
4348         adapter->stats.mptc -= total;
4349         adapter->stats.ptc64 -= total;
4350         adapter->stats.gotc -= total * ETHER_MIN_LEN;
4351
4352         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4353         adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4354         adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4355         adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4356         adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4357         adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4358         adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4359         adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4360         adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4361         adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4362
4363
4364         /* Fill out the OS statistics structure */
4365         ifp->if_ipackets = adapter->stats.gprc;
4366         ifp->if_opackets = adapter->stats.gptc;
4367         ifp->if_ibytes = adapter->stats.gorc;
4368         ifp->if_obytes = adapter->stats.gotc;
4369         ifp->if_imcasts = adapter->stats.mprc;
4370         ifp->if_collisions = 0;
4371
4372         /* Rx Errors */
4373         ifp->if_ierrors = missed_rx + adapter->stats.crcerrs +
4374                 adapter->stats.rlec;
4375 }
4376
4377
4378 /**********************************************************************
4379  *
4380  *  This routine is called only when ixgbe_display_debug_stats is enabled.
4381  *  This routine provides a way to take a look at important statistics
4382  *  maintained by the driver and hardware.
4383  *
4384  **********************************************************************/
4385 static void
4386 ixgbe_print_hw_stats(struct adapter * adapter)
4387 {
4388         device_t dev = adapter->dev;
4389
4390
4391         device_printf(dev,"Std Mbuf Failed = %lu\n",
4392                adapter->mbuf_defrag_failed);
4393         device_printf(dev,"Missed Packets = %llu\n",
4394                (long long)adapter->stats.mpc[0]);
4395         device_printf(dev,"Receive length errors = %llu\n",
4396                ((long long)adapter->stats.roc +
4397                (long long)adapter->stats.ruc));
4398         device_printf(dev,"Crc errors = %llu\n",
4399                (long long)adapter->stats.crcerrs);
4400         device_printf(dev,"Driver dropped packets = %lu\n",
4401                adapter->dropped_pkts);
4402         device_printf(dev, "watchdog timeouts = %ld\n",
4403                adapter->watchdog_events);
4404
4405         device_printf(dev,"XON Rcvd = %llu\n",
4406                (long long)adapter->stats.lxonrxc);
4407         device_printf(dev,"XON Xmtd = %llu\n",
4408                (long long)adapter->stats.lxontxc);
4409         device_printf(dev,"XOFF Rcvd = %llu\n",
4410                (long long)adapter->stats.lxoffrxc);
4411         device_printf(dev,"XOFF Xmtd = %llu\n",
4412                (long long)adapter->stats.lxofftxc);
4413
4414         device_printf(dev,"Total Packets Rcvd = %llu\n",
4415                (long long)adapter->stats.tpr);
4416         device_printf(dev,"Good Packets Rcvd = %llu\n",
4417                (long long)adapter->stats.gprc);
4418         device_printf(dev,"Good Packets Xmtd = %llu\n",
4419                (long long)adapter->stats.gptc);
4420         device_printf(dev,"TSO Transmissions = %lu\n",
4421                adapter->tso_tx);
4422
4423         return;
4424 }
4425
4426 /**********************************************************************
4427  *
4428  *  This routine is called only when em_display_debug_stats is enabled.
4429  *  This routine provides a way to take a look at important statistics
4430  *  maintained by the driver and hardware.
4431  *
4432  **********************************************************************/
4433 static void
4434 ixgbe_print_debug_info(struct adapter *adapter)
4435 {
4436         device_t dev = adapter->dev;
4437         struct rx_ring *rxr = adapter->rx_rings;
4438         struct tx_ring *txr = adapter->tx_rings;
4439         struct ixgbe_hw *hw = &adapter->hw;
4440  
4441         device_printf(dev,"Error Byte Count = %u \n",
4442             IXGBE_READ_REG(hw, IXGBE_ERRBC));
4443
4444         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
4445                 struct lro_ctrl         *lro = &rxr->lro;
4446                 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
4447                     i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
4448                     IXGBE_READ_REG(hw, IXGBE_RDT(i)));
4449                 device_printf(dev,"RX(%d) Packets Received: %lld\n",
4450                     rxr->me, (long long)rxr->rx_packets);
4451                 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
4452                     rxr->me, (long long)rxr->rx_split_packets);
4453                 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
4454                     rxr->me, (long)rxr->rx_bytes);
4455                 device_printf(dev,"RX(%d) IRQ Handled: %lu\n",
4456                     rxr->me, (long)rxr->rx_irq);
4457                 device_printf(dev,"RX(%d) LRO Queued= %d\n",
4458                     rxr->me, lro->lro_queued);
4459                 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
4460                     rxr->me, lro->lro_flushed);
4461         }
4462
4463         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
4464                 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i,
4465                     IXGBE_READ_REG(hw, IXGBE_TDH(i)),
4466                     IXGBE_READ_REG(hw, IXGBE_TDT(i)));
4467                 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
4468                     txr->me, (long)txr->total_packets);
4469                 device_printf(dev,"TX(%d) IRQ Handled: %lu\n",
4470                     txr->me, (long)txr->tx_irq);
4471                 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
4472                     txr->me, (long)txr->no_tx_desc_avail);
4473         }
4474
4475         device_printf(dev,"Link IRQ Handled: %lu\n",
4476             (long)adapter->link_irq);
4477         return;
4478 }
4479
4480 static int
4481 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
4482 {
4483         int             error;
4484         int             result;
4485         struct adapter *adapter;
4486
4487         result = -1;
4488         error = sysctl_handle_int(oidp, &result, 0, req);
4489
4490         if (error || !req->newptr)
4491                 return (error);
4492
4493         if (result == 1) {
4494                 adapter = (struct adapter *) arg1;
4495                 ixgbe_print_hw_stats(adapter);
4496         }
4497         return error;
4498 }
4499
4500 static int
4501 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
4502 {
4503         int error, result;
4504         struct adapter *adapter;
4505
4506         result = -1;
4507         error = sysctl_handle_int(oidp, &result, 0, req);
4508
4509         if (error || !req->newptr)
4510                 return (error);
4511
4512         if (result == 1) {
4513                 adapter = (struct adapter *) arg1;
4514                 ixgbe_print_debug_info(adapter);
4515         }
4516         return error;
4517 }
4518
4519 /*
4520 ** Set flow control using sysctl:
4521 ** Flow control values:
4522 **      0 - off
4523 **      1 - rx pause
4524 **      2 - tx pause
4525 **      3 - full
4526 */
4527 static int
4528 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
4529 {
4530         int error;
4531         struct adapter *adapter;
4532
4533         error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
4534
4535         if (error)
4536                 return (error);
4537
4538         adapter = (struct adapter *) arg1;
4539         switch (ixgbe_flow_control) {
4540                 case ixgbe_fc_rx_pause:
4541                 case ixgbe_fc_tx_pause:
4542                 case ixgbe_fc_full:
4543                         adapter->hw.fc.requested_mode = ixgbe_flow_control;
4544                         break;
4545                 case ixgbe_fc_none:
4546                 default:
4547                         adapter->hw.fc.requested_mode = ixgbe_fc_none;
4548         }
4549
4550         ixgbe_fc_enable(&adapter->hw, 0);
4551         return error;
4552 }
4553
4554 static void
4555 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
4556         const char *description, int *limit, int value)
4557 {
4558         *limit = value;
4559         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4560             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4561             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4562 }
4563
4564 #ifdef IXGBE_TIMESYNC
4565 /*
4566  * Initialize the Time Sync Feature
4567  */
4568 static int
4569 ixgbe_tsync_init(struct adapter *adapter)
4570 {
4571         device_t        dev = adapter->dev;
4572         u32             tx_ctl, rx_ctl;
4573
4574
4575         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIMINCA, (1<<24) |
4576             20833/PICOSECS_PER_TICK);
4577
4578         adapter->last_stamp =  IXGBE_READ_REG(&adapter->hw, IXGBE_SYSTIML);
4579         adapter->last_stamp |= (u64)IXGBE_READ_REG(&adapter->hw,
4580             IXGBE_SYSTIMH) << 32ULL;
4581
4582         /* Enable the TX side */
4583         tx_ctl =  IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4584         tx_ctl |= 0x10;
4585         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCTXCTL, tx_ctl);
4586         IXGBE_WRITE_FLUSH(&adapter->hw);
4587
4588         tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4589         if ((tx_ctl & 0x10) == 0) {
4590                 device_printf(dev, "Failed to enable TX timestamping\n");
4591                 return (ENXIO);
4592         } 
4593
4594         /* Enable RX */
4595         rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4596         rx_ctl |= 0x10; /* Enable the feature */
4597         rx_ctl |= 0x04; /* This value turns on Ver 1 and 2 */
4598         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCRXCTL, rx_ctl);
4599
4600         /*
4601          * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 (Ethertype)
4602          * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable filter)
4603          * Ethertype Filter Queue Filter[0][31] = 0x1 (Enable Timestamping)
4604          */
4605         IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), 0xC00088f7);
4606
4607         IXGBE_WRITE_FLUSH(&adapter->hw);
4608
4609         rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4610         if ((rx_ctl & 0x10) == 0) {
4611                 device_printf(dev, "Failed to enable RX timestamping\n");
4612                 return (ENXIO);
4613         } 
4614
4615         device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n");
4616
4617         return (0);
4618 }
4619
4620 /*
4621  * Disable the Time Sync Feature
4622  */
4623 static void
4624 ixgbe_tsync_disable(struct adapter *adapter)
4625 {
4626         u32             tx_ctl, rx_ctl;
4627  
4628         tx_ctl =  IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4629         tx_ctl &= ~0x10;
4630         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCTXCTL, tx_ctl);
4631         IXGBE_WRITE_FLUSH(&adapter->hw);
4632    
4633         /* Invalidate TX Timestamp */
4634         IXGBE_READ_REG(&adapter->hw, IXGBE_TXSTMPH);
4635  
4636         tx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCTXCTL);
4637         if (tx_ctl & 0x10)
4638                 HW_DEBUGOUT("Failed to disable TX timestamping\n");
4639    
4640         rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4641         rx_ctl &= ~0x10;
4642    
4643         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSYNCRXCTL, rx_ctl);
4644         IXGBE_WRITE_FLUSH(&adapter->hw);
4645    
4646         /* Invalidate RX Timestamp */
4647         IXGBE_READ_REG(&adapter->hw, IXGBE_RXSATRH);
4648  
4649         rx_ctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TSYNCRXCTL);
4650         if (rx_ctl & 0x10)
4651                 HW_DEBUGOUT("Failed to disable RX timestamping\n");
4652  
4653         return;
4654 }
4655
4656 #endif /* IXGBE_TIMESYNC */