]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/ixgbe.c
This commit was generated by cvs2svn to compensate for changes in r178481,
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / ixgbe.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2007, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33 /* $FreeBSD$ */
34
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
37 #endif
38
39 #include "ixgbe.h"
40
41 /*********************************************************************
42  *  Set this to one to display debug statistics
43  *********************************************************************/
44 int             ixgbe_display_debug_stats = 0;
45
46 /*********************************************************************
47  *  Driver version
48  *********************************************************************/
49 char ixgbe_driver_version[] = "1.2.6";
50
51 /*********************************************************************
52  *  PCI Device ID Table
53  *
54  *  Used by probe to select devices to load on
55  *  Last field stores an index into ixgbe_strings
56  *  Last entry must be all 0s
57  *
58  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  *********************************************************************/
60
61 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
62 {
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
66         /* required last entry */
67         {0, 0, 0, 0, 0}
68 };
69
70 /*********************************************************************
71  *  Table of branding strings
72  *********************************************************************/
73
74 static char    *ixgbe_strings[] = {
75         "Intel(R) PRO/10GbE PCI-Express Network Driver"
76 };
77
78 /*********************************************************************
79  *  Function prototypes
80  *********************************************************************/
81 static int      ixgbe_probe(device_t);
82 static int      ixgbe_attach(device_t);
83 static int      ixgbe_detach(device_t);
84 static int      ixgbe_shutdown(device_t);
85 static void     ixgbe_start(struct ifnet *);
86 static void     ixgbe_start_locked(struct ifnet *);
87 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
88 static void     ixgbe_watchdog(struct adapter *);
89 static void     ixgbe_init(void *);
90 static void     ixgbe_init_locked(struct adapter *);
91 static void     ixgbe_stop(void *);
92 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
93 static int      ixgbe_media_change(struct ifnet *);
94 static void     ixgbe_identify_hardware(struct adapter *);
95 static int      ixgbe_allocate_pci_resources(struct adapter *);
96 static void     ixgbe_free_pci_resources(struct adapter *);
97 static void     ixgbe_local_timer(void *);
98 static int      ixgbe_hardware_init(struct adapter *);
99 static void     ixgbe_setup_interface(device_t, struct adapter *);
100 static int      ixgbe_allocate_queues(struct adapter *);
101 static int      ixgbe_allocate_msix_resources(struct adapter *);
102 #if __FreeBSD_version >= 700000
103 static int      ixgbe_setup_msix(struct adapter *);
104 #endif
105 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
106 static int      ixgbe_setup_transmit_structures(struct adapter *);
107 static void     ixgbe_setup_transmit_ring(struct tx_ring *);
108 static void     ixgbe_initialize_transmit_units(struct adapter *);
109 static void     ixgbe_free_transmit_structures(struct adapter *);
110 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
111
112 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
113 static int      ixgbe_setup_receive_structures(struct adapter *);
114 static int      ixgbe_setup_receive_ring(struct rx_ring *);
115 static void     ixgbe_initialize_receive_units(struct adapter *);
116 static void     ixgbe_free_receive_structures(struct adapter *);
117 static void     ixgbe_free_receive_buffers(struct rx_ring *);
118
119 static void     ixgbe_enable_intr(struct adapter *);
120 static void     ixgbe_disable_intr(struct adapter *);
121 static void     ixgbe_update_stats_counters(struct adapter *);
122 static bool     ixgbe_txeof(struct tx_ring *);
123 static int      ixgbe_rxeof(struct rx_ring *, int);
124 static void     ixgbe_rx_checksum(struct adapter *, uint32_t, struct mbuf *);
125 static void     ixgbe_set_promisc(struct adapter *);
126 static void     ixgbe_disable_promisc(struct adapter *);
127 static void     ixgbe_set_multi(struct adapter *);
128 static void     ixgbe_print_hw_stats(struct adapter *);
129 static void     ixgbe_print_debug_info(struct adapter *);
130 static void     ixgbe_update_link_status(struct adapter *);
131 static int      ixgbe_get_buf(struct rx_ring *, int);
132 static void     ixgbe_enable_vlans(struct adapter * adapter);
133 static int      ixgbe_encap(struct adapter *, struct mbuf **);
134 static int      ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS);
135 static int      ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS);
136 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
137 static int      ixgbe_dma_malloc(struct adapter *, bus_size_t,
138                     struct ixgbe_dma_alloc *, int);
139 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
140 static void     ixgbe_add_rx_process_limit(struct adapter *, const char *,
141                     const char *, int *, int);
142 static boolean_t ixgbe_tx_csum_setup(struct tx_ring *, struct mbuf *);
143 static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
144 static void     ixgbe_set_ivar(struct adapter *, u16, u8);
145 static void     ixgbe_configure_ivars(struct adapter *);
146
147 /* Legacy Fast Interrupt routine and handlers */
148 #if __FreeBSD_version >= 700000
149 static int      ixgbe_fast_irq(void *);
150 /* The MSI/X Interrupt handlers */
151 static void     ixgbe_msix_tx(void *);
152 static void     ixgbe_msix_rx(void *);
153 static void     ixgbe_msix_link(void *);
154 #else
155 static void     ixgbe_fast_irq(void *);
156 #endif
157
158 static void     ixgbe_rxtx(void *context, int pending);
159 static void     ixgbe_link(void *context, int pending);
160
161 #ifndef NO_82598_A0_SUPPORT
162 static void     desc_flip(void *);
163 #endif
164
165 /*********************************************************************
166  *  FreeBSD Device Interface Entry Points
167  *********************************************************************/
168
169 static device_method_t ixgbe_methods[] = {
170         /* Device interface */
171         DEVMETHOD(device_probe, ixgbe_probe),
172         DEVMETHOD(device_attach, ixgbe_attach),
173         DEVMETHOD(device_detach, ixgbe_detach),
174         DEVMETHOD(device_shutdown, ixgbe_shutdown),
175         {0, 0}
176 };
177
178 static driver_t ixgbe_driver = {
179         "ix", ixgbe_methods, sizeof(struct adapter),
180 };
181
182 static devclass_t ixgbe_devclass;
183 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
184
185 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
186 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
187
188 /*
189 ** TUNEABLE PARAMETERS:
190 */
191
192 /* How many packets rxeof tries to clean at a time */
193 static int ixgbe_rx_process_limit = 100;
194 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
195
196 /* Flow control setting, default to full */
197 static int ixgbe_flow_control = 3;
198 TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
199
200 /* Number of TX Queues, note multi tx is not working */
201 static int ixgbe_tx_queues = 1;
202 TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues);
203
204 /* Number of RX Queues */
205 static int ixgbe_rx_queues = 8;
206 TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues);
207
208 /* Number of Other Queues, this is used for link interrupts */
209 static int ixgbe_other_queues = 1;
210 TUNABLE_INT("hw.ixgbe.other_queues", &ixgbe_other_queues);
211
212 /* Number of TX descriptors per ring */
213 static int ixgbe_txd = DEFAULT_TXD;
214 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
215
216 /* Number of RX descriptors per ring */
217 static int ixgbe_rxd = DEFAULT_RXD;
218 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
219
220 /* Total number of Interfaces - need for config sanity check */
221 static int ixgbe_total_ports;
222
223 /*********************************************************************
224  *  Device identification routine
225  *
226  *  ixgbe_probe determines if the driver should be loaded on
227  *  adapter based on PCI vendor/device id of the adapter.
228  *
229  *  return 0 on success, positive on failure
230  *********************************************************************/
231
232 static int
233 ixgbe_probe(device_t dev)
234 {
235         ixgbe_vendor_info_t *ent;
236
237         u_int16_t       pci_vendor_id = 0;
238         u_int16_t       pci_device_id = 0;
239         u_int16_t       pci_subvendor_id = 0;
240         u_int16_t       pci_subdevice_id = 0;
241         char            adapter_name[60];
242
243         INIT_DEBUGOUT("ixgbe_probe: begin");
244
245         pci_vendor_id = pci_get_vendor(dev);
246         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
247                 return (ENXIO);
248
249         pci_device_id = pci_get_device(dev);
250         pci_subvendor_id = pci_get_subvendor(dev);
251         pci_subdevice_id = pci_get_subdevice(dev);
252
253         ent = ixgbe_vendor_info_array;
254         while (ent->vendor_id != 0) {
255                 if ((pci_vendor_id == ent->vendor_id) &&
256                     (pci_device_id == ent->device_id) &&
257
258                     ((pci_subvendor_id == ent->subvendor_id) ||
259                      (ent->subvendor_id == 0)) &&
260
261                     ((pci_subdevice_id == ent->subdevice_id) ||
262                      (ent->subdevice_id == 0))) {
263                         sprintf(adapter_name, "%s, Version - %s",
264                                 ixgbe_strings[ent->index],
265                                 ixgbe_driver_version);
266                         switch (pci_device_id) {
267                                 case IXGBE_DEV_ID_82598AF_DUAL_PORT :
268                                         ixgbe_total_ports += 2;
269                                         break;
270                                 case IXGBE_DEV_ID_82598AF_SINGLE_PORT :
271                                         ixgbe_total_ports += 1;
272                                 default:
273                                         break;
274                         }
275                         device_set_desc_copy(dev, adapter_name);
276                         return (0);
277                 }
278                 ent++;
279         }
280
281         return (ENXIO);
282 }
283
284 /*********************************************************************
285  *  Device initialization routine
286  *
287  *  The attach entry point is called when the driver is being loaded.
288  *  This routine identifies the type of hardware, allocates all resources
289  *  and initializes the hardware.
290  *
291  *  return 0 on success, positive on failure
292  *********************************************************************/
293
294 static int
295 ixgbe_attach(device_t dev)
296 {
297         struct adapter *adapter;
298         int             error = 0;
299         uint32_t        ctrl_ext;
300         char            name_string[16];
301
302         INIT_DEBUGOUT("ixgbe_attach: begin");
303
304         /* Allocate, clear, and link in our adapter structure */
305         adapter = device_get_softc(dev);
306         adapter->dev = adapter->osdep.dev = dev;
307         /* General Lock Init*/
308         snprintf(name_string, sizeof(name_string), "%s:core",
309             device_get_nameunit(dev));
310         mtx_init(&adapter->core_mtx, name_string, NULL, MTX_DEF);
311
312         /* SYSCTL APIs */
313         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
314                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
315                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
316                         adapter, 0, ixgbe_sysctl_stats, "I", "Statistics");
317
318         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
319                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
320                         OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
321                         adapter, 0, ixgbe_sysctl_debug, "I", "Debug Info");
322
323         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
324                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
325                         OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
326                         adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
327
328         /* Set up the timer callout */
329         callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
330
331         /* Determine hardware revision */
332         ixgbe_identify_hardware(adapter);
333
334         /* Indicate to RX setup to use Jumbo Clusters */
335         adapter->bigbufs = TRUE;
336
337         /* Do base PCI setup - map BAR0 */
338         if (ixgbe_allocate_pci_resources(adapter)) {
339                 device_printf(dev, "Allocation of PCI resources failed\n");
340                 error = ENXIO;
341                 goto err_out;
342         }
343
344         /* Do descriptor calc and sanity checks */
345         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
347                 device_printf(dev, "TXD config issue, using default!\n");
348                 adapter->num_tx_desc = DEFAULT_TXD;
349         } else
350                 adapter->num_tx_desc = ixgbe_txd;
351
352         /*
353         ** With many RX rings it is easy to exceed the
354         ** system mbuf allocation. Tuning nmbclusters
355         ** can alleviate this.
356         */
357         if ((adapter->num_rx_queues > 1) && (nmbclusters > 0 )){
358                 int s;
359                 /* Calculate the total RX mbuf needs */
360                 s = (ixgbe_rxd * adapter->num_rx_queues) * ixgbe_total_ports;
361                 if (s > nmbclusters) {
362                         device_printf(dev, "RX Descriptors exceed "
363                             "system mbuf max, using default instead!\n");
364                         ixgbe_rxd = DEFAULT_RXD;
365                 }
366         }
367
368         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
369             ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
370                 device_printf(dev, "RXD config issue, using default!\n");
371                 adapter->num_rx_desc = DEFAULT_RXD;
372         } else
373                 adapter->num_rx_desc = ixgbe_rxd;
374
375         /* Allocate our TX/RX Queues */
376         if (ixgbe_allocate_queues(adapter)) {
377                 error = ENOMEM;
378                 goto err_out;
379         }
380
381 #if __FreeBSD_version >= 700000
382         if (adapter->msix) {
383                 error = ixgbe_setup_msix(adapter); 
384                 if (error) 
385                         goto err_out;
386         }
387 #endif
388
389         /* Initialize the shared code */
390         if (ixgbe_init_shared_code(&adapter->hw)) {
391                 device_printf(dev,"Unable to initialize the shared code\n");
392                 error = EIO;
393                 goto err_out;
394         }
395
396         /* Initialize the hardware */
397         if (ixgbe_hardware_init(adapter)) {
398                 device_printf(dev,"Unable to initialize the hardware\n");
399                 error = EIO;
400                 goto err_out;
401         }
402
403         /* Setup OS specific network interface */
404         ixgbe_setup_interface(dev, adapter);
405
406         /* Sysctl for limiting the amount of work done in the taskqueue */
407         ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
408             "max number of rx packets to process", &adapter->rx_process_limit,
409             ixgbe_rx_process_limit);
410
411         /* Initialize statistics */
412         ixgbe_update_stats_counters(adapter);
413
414         /* let hardware know driver is loaded */
415         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
416         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
417         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
418
419         INIT_DEBUGOUT("ixgbe_attach: end");
420         return (0);
421
422 err_out:
423         ixgbe_free_pci_resources(adapter);
424         return (error);
425
426 }
427
428 /*********************************************************************
429  *  Device removal routine
430  *
431  *  The detach entry point is called when the driver is being removed.
432  *  This routine stops the adapter and deallocates all the resources
433  *  that were allocated for driver operation.
434  *
435  *  return 0 on success, positive on failure
436  *********************************************************************/
437
438 static int
439 ixgbe_detach(device_t dev)
440 {
441         struct adapter *adapter = device_get_softc(dev);
442         u32     ctrl_ext;
443
444         INIT_DEBUGOUT("ixgbe_detach: begin");
445
446         /* Make sure VLANS are not using driver */
447 #if __FreeBSD_version >= 700000
448         if (adapter->ifp->if_vlantrunk != NULL) {
449 #else
450         if (adapter->ifp->if_nvlans != 0) {
451 #endif
452                 device_printf(dev,"Vlan in use, detach first\n");
453                 return (EBUSY);
454         }
455
456         mtx_lock(&adapter->core_mtx);
457         ixgbe_stop(adapter);
458         mtx_unlock(&adapter->core_mtx);
459
460         if (adapter->tq != NULL) {
461                 taskqueue_drain(adapter->tq, &adapter->rxtx_task);
462                 taskqueue_drain(taskqueue_fast, &adapter->link_task);
463                 taskqueue_free(adapter->tq);
464                 adapter->tq = NULL;
465         }
466
467         /* let hardware know driver is unloading */
468         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
469         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
470         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
471
472         ether_ifdetach(adapter->ifp);
473         callout_drain(&adapter->timer);
474         ixgbe_free_pci_resources(adapter);
475         bus_generic_detach(dev);
476         if_free(adapter->ifp);
477
478         ixgbe_free_transmit_structures(adapter);
479         ixgbe_free_receive_structures(adapter);
480
481         mtx_destroy(&adapter->core_mtx);
482         return (0);
483 }
484
485 /*********************************************************************
486  *
487  *  Shutdown entry point
488  *
489  **********************************************************************/
490
491 static int
492 ixgbe_shutdown(device_t dev)
493 {
494         struct adapter *adapter = device_get_softc(dev);
495         mtx_lock(&adapter->core_mtx);
496         ixgbe_stop(adapter);
497         mtx_unlock(&adapter->core_mtx);
498         return (0);
499 }
500
501
502 /*********************************************************************
503  *  Transmit entry point
504  *
505  *  ixgbe_start is called by the stack to initiate a transmit.
506  *  The driver will remain in this routine as long as there are
507  *  packets to transmit and transmit resources are available.
508  *  In case resources are not available stack is notified and
509  *  the packet is requeued.
510  **********************************************************************/
511
512 static void
513 ixgbe_start_locked(struct ifnet * ifp)
514 {
515         struct mbuf    *m_head;
516         struct adapter *adapter = ifp->if_softc;
517
518         mtx_assert(&adapter->tx_mtx, MA_OWNED);
519
520         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
521             IFF_DRV_RUNNING)
522                 return;
523         if (!adapter->link_active)
524                 return;
525
526         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
527
528                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
529                 if (m_head == NULL)
530                         break;
531
532                 if (ixgbe_encap(adapter, &m_head)) {
533                         if (m_head == NULL)
534                                 break;
535                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
536                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
537                         break;
538                 }
539                 /* Send a copy of the frame to the BPF listener */
540                 ETHER_BPF_MTAP(ifp, m_head);
541
542                 /* Set timeout in case hardware has problems transmitting */
543                 adapter->watchdog_timer = IXGBE_TX_TIMEOUT;
544
545         }
546         return;
547 }
548
549 static void
550 ixgbe_start(struct ifnet *ifp)
551 {
552         struct adapter *adapter = ifp->if_softc;
553
554         mtx_lock(&adapter->tx_mtx);
555         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
556                 ixgbe_start_locked(ifp);
557         mtx_unlock(&adapter->tx_mtx);
558         return;
559 }
560
561 /*********************************************************************
562  *  Ioctl entry point
563  *
564  *  ixgbe_ioctl is called when the user wants to configure the
565  *  interface.
566  *
567  *  return 0 on success, positive on failure
568  **********************************************************************/
569
570 static int
571 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
572 {
573         int             error = 0;
574         struct ifreq   *ifr = (struct ifreq *) data;
575         struct ifaddr   *ifa = (struct ifaddr *) data;
576         struct adapter *adapter = ifp->if_softc;
577
578         switch (command) {
579         case SIOCSIFADDR:
580                 IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
581                 if (ifa->ifa_addr->sa_family == AF_INET) {
582                         ifp->if_flags |= IFF_UP;
583                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
584                                 mtx_lock(&adapter->core_mtx);
585                                 ixgbe_init_locked(adapter);
586                                 mtx_unlock(&adapter->core_mtx);
587                         }
588                         arp_ifinit(ifp, ifa);
589                 } else
590                         ether_ioctl(ifp, command, data);
591                 break;
592         case SIOCSIFMTU:
593                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
594                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
595                         error = EINVAL;
596                 } else {
597                         mtx_lock(&adapter->core_mtx);
598                         ifp->if_mtu = ifr->ifr_mtu;
599                         adapter->max_frame_size =
600                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
601                         ixgbe_init_locked(adapter);
602                         mtx_unlock(&adapter->core_mtx);
603                 }
604                 break;
605         case SIOCSIFFLAGS:
606                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
607                 mtx_lock(&adapter->core_mtx);
608                 if (ifp->if_flags & IFF_UP) {
609                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
610                                 if ((ifp->if_flags ^ adapter->if_flags) &
611                                     IFF_PROMISC) {
612                                         ixgbe_disable_promisc(adapter);
613                                         ixgbe_set_promisc(adapter);
614                                 }
615                         } else
616                                 ixgbe_init_locked(adapter);
617                 } else
618                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
619                                 ixgbe_stop(adapter);
620                 adapter->if_flags = ifp->if_flags;
621                 mtx_unlock(&adapter->core_mtx);
622                 break;
623         case SIOCADDMULTI:
624         case SIOCDELMULTI:
625                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
626                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
627                         mtx_lock(&adapter->core_mtx);
628                         ixgbe_disable_intr(adapter);
629                         ixgbe_set_multi(adapter);
630                         ixgbe_enable_intr(adapter);
631                         mtx_unlock(&adapter->core_mtx);
632                 }
633                 break;
634         case SIOCSIFMEDIA:
635         case SIOCGIFMEDIA:
636                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
637                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
638                 break;
639         case SIOCSIFCAP:
640         {
641                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
642                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
643                 if (mask & IFCAP_HWCSUM)
644                         ifp->if_capenable ^= IFCAP_HWCSUM;
645                 if (mask & IFCAP_TSO4)
646                         ifp->if_capenable ^= IFCAP_TSO4;
647                 if (mask & IFCAP_VLAN_HWTAGGING)
648                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
649                 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
650                         ixgbe_init(adapter);
651 #if __FreeBSD_version >= 700000
652                 VLAN_CAPABILITIES(ifp);
653 #endif
654                 break;
655         }
656         default:
657                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
658                 error = ether_ioctl(ifp, command, data);
659                 break;
660         }
661
662         return (error);
663 }
664
665 /*********************************************************************
666  *  Watchdog entry point
667  *
668  *  This routine is called whenever hardware quits transmitting.
669  *
670  **********************************************************************/
671
672 static void
673 ixgbe_watchdog(struct adapter *adapter)
674 {
675
676         mtx_assert(&adapter->core_mtx, MA_OWNED);
677
678         /*
679          * The timer is set to 5 every time ixgbe_start() queues a packet.
680          * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
681          * least one descriptor.
682          * Finally, anytime all descriptors are clean the timer is
683          * set to 0.
684          */
685         if (adapter->watchdog_timer == 0 || --adapter->watchdog_timer)
686                 return;
687
688         /*
689          * If we are in this routine because of pause frames, then don't
690          * reset the hardware.
691          */
692         if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
693                 adapter->watchdog_timer = IXGBE_TX_TIMEOUT;
694                 return;
695         }
696
697
698         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
699         ixgbe_print_debug_info(adapter);
700
701         adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
702         adapter->watchdog_events++;
703
704         ixgbe_init_locked(adapter);
705
706 }
707
708 /*********************************************************************
709  *  Init entry point
710  *
711  *  This routine is used in two ways. It is used by the stack as
712  *  init entry point in network interface structure. It is also used
713  *  by the driver as a hw/sw initialization routine to get to a
714  *  consistent state.
715  *
716  *  return 0 on success, positive on failure
717  **********************************************************************/
718 #define IXGBE_MHADD_MFS_SHIFT 16
719
720 static void
721 ixgbe_init_locked(struct adapter *adapter)
722 {
723         struct ifnet   *ifp = adapter->ifp;
724         device_t        dev = adapter->dev;
725         u32             txdctl, rxdctl, mhadd;
726
727         INIT_DEBUGOUT("ixgbe_init: begin");
728
729         mtx_assert(&adapter->core_mtx, MA_OWNED);
730
731         ixgbe_stop(adapter);
732
733         /* Get the latest mac address, User can use a LAA */
734         bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
735               IXGBE_ETH_LENGTH_OF_ADDRESS);
736         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, 1);
737         adapter->hw.addr_ctrl.rar_used_count = 1;
738
739         /* Initialize the hardware */
740         if (ixgbe_hardware_init(adapter)) {
741                 device_printf(dev, "Unable to initialize the hardware\n");
742                 return;
743         }
744
745         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
746                 ixgbe_enable_vlans(adapter);
747
748         /* Prepare transmit descriptors and buffers */
749         if (ixgbe_setup_transmit_structures(adapter)) {
750                 device_printf(dev,"Could not setup transmit structures\n");
751                 ixgbe_stop(adapter);
752                 return;
753         }
754
755         ixgbe_initialize_transmit_units(adapter);
756
757         /* Setup Multicast table */
758         ixgbe_set_multi(adapter);
759
760         /*
761         ** If we are resetting MTU smaller than 2K
762         ** drop to small RX buffers
763         */
764         if (adapter->max_frame_size <= MCLBYTES)
765                 adapter->bigbufs = FALSE;
766
767         /* Prepare receive descriptors and buffers */
768         if (ixgbe_setup_receive_structures(adapter)) {
769                 device_printf(dev,"Could not setup receive structures\n");
770                 ixgbe_stop(adapter);
771                 return;
772         }
773
774         /* Configure RX settings */
775         ixgbe_initialize_receive_units(adapter);
776
777         /* Enable Enhanced MSIX mode */
778         if (adapter->msix) {
779                 u32     gpie;
780                 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
781                 gpie |= IXGBE_GPIE_MSIX_MODE;
782                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
783                     IXGBE_GPIE_OCD;
784                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
785                 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
786         }
787
788         /* Set the various hardware offload abilities */
789         ifp->if_hwassist = 0;
790         if (ifp->if_capenable & IFCAP_TSO4)
791                 ifp->if_hwassist |= CSUM_TSO;
792         else if (ifp->if_capenable & IFCAP_TXCSUM)
793                 ifp->if_hwassist = (CSUM_TCP | CSUM_UDP);
794
795         /* Set MTU size */
796         if (ifp->if_mtu > ETHERMTU) {
797                 mhadd = IXGBE_READ_REG(&adapter->hw, IXGBE_MHADD);
798                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
799                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
800                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MHADD, mhadd);
801         }
802         
803         /* Now enable all the queues */
804
805         for (int i = 0; i < adapter->num_tx_queues; i++) {
806                 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
807                 txdctl |= IXGBE_TXDCTL_ENABLE;
808                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
809         }
810
811         for (int i = 0; i < adapter->num_rx_queues; i++) {
812                 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
813                 rxdctl |= IXGBE_RXDCTL_ENABLE;
814                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
815         }
816
817         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
818
819         /* Set up MSI/X routing */
820         ixgbe_configure_ivars(adapter);
821
822         ixgbe_enable_intr(adapter);
823
824         /* Now inform the stack we're ready */
825         ifp->if_drv_flags |= IFF_DRV_RUNNING;
826         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
827
828         return;
829 }
830
831 static void
832 ixgbe_init(void *arg)
833 {
834         struct adapter *adapter = arg;
835
836         mtx_lock(&adapter->core_mtx);
837         ixgbe_init_locked(adapter);
838         mtx_unlock(&adapter->core_mtx);
839         return;
840 }
841
842
843 static void
844 ixgbe_link(void *context, int pending)
845 {
846         struct adapter  *adapter = context;
847         struct ifnet *ifp = adapter->ifp;
848
849         mtx_lock(&adapter->core_mtx);
850         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
851                 mtx_unlock(&adapter->core_mtx);
852                 return;
853         }
854
855         callout_stop(&adapter->timer);
856         ixgbe_update_link_status(adapter);
857         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
858         mtx_unlock(&adapter->core_mtx);
859 }
860
861 /*
862 ** MSI and Legacy Deferred Handler
863 **      - note this runs without the general lock
864 */
865 static void
866 ixgbe_rxtx(void *context, int pending)
867 {
868         struct adapter  *adapter = context;
869         struct ifnet    *ifp = adapter->ifp;
870         /* For legacy there is only one of each */
871         struct rx_ring *rxr = adapter->rx_rings;
872         struct tx_ring *txr = adapter->tx_rings;
873
874         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
875                 if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0)
876                         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
877                 mtx_lock(&adapter->tx_mtx);
878                 ixgbe_txeof(txr);
879
880                 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
881                         ixgbe_start_locked(ifp);
882                 mtx_unlock(&adapter->tx_mtx);
883         }
884
885         ixgbe_enable_intr(adapter);
886 }
887
888
889 /*********************************************************************
890  *
891  *  Legacy Interrupt Service routine
892  *
893  **********************************************************************/
894
895 #if __FreeBSD_version >= 700000
896 static int
897 #else
898 static void
899 #endif
900 ixgbe_fast_irq(void *arg)
901 {
902         u32       reg_eicr;
903         struct adapter *adapter = arg;
904
905         reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
906         if (reg_eicr == 0)
907                 return FILTER_STRAY;
908
909         ixgbe_disable_intr(adapter);
910         taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
911
912         /* Link status change */
913         if (reg_eicr & IXGBE_EICR_LSC)
914                 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
915
916         return FILTER_HANDLED;
917 }
918
919
920 #if __FreeBSD_version >= 700000
921 /*********************************************************************
922  *
923  *  MSI TX Interrupt Service routine
924  *
925  **********************************************************************/
926
927 void
928 ixgbe_msix_tx(void *arg)
929 {
930         struct tx_ring *txr = arg;
931         struct adapter *adapter = txr->adapter;
932         struct ifnet   *ifp = adapter->ifp;
933         uint32_t       loop_cnt = MAX_INTR;
934
935         mtx_lock(&adapter->tx_mtx);
936
937         while (loop_cnt > 0) {
938                 if (__predict_false(!ixgbe_txeof(txr)))
939                         break;
940                 loop_cnt--;
941         }
942
943         if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
944             ifp->if_snd.ifq_head != NULL)
945                 ixgbe_start_locked(ifp);
946         ixgbe_enable_intr(adapter);
947         mtx_unlock(&adapter->tx_mtx);
948         return;
949 }
950
951 /*********************************************************************
952  *
953  *  MSI RX Interrupt Service routine
954  *
955  **********************************************************************/
956
957 static void
958 ixgbe_msix_rx(void *arg)
959 {
960         struct rx_ring  *rxr = arg;
961         struct adapter  *adapter = rxr->adapter;
962         struct ifnet    *ifp = adapter->ifp;
963         uint32_t        loop = MAX_INTR;
964
965
966         while ((loop-- > 0) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
967                 ixgbe_rxeof(rxr, adapter->rx_process_limit);
968
969         ixgbe_enable_intr(adapter);
970 }
971
972 static void
973 ixgbe_msix_link(void *arg)
974 {
975         struct adapter  *adapter = arg;
976         uint32_t       reg_eicr;
977
978         mtx_lock(&adapter->core_mtx);
979
980         reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
981
982         if (reg_eicr & IXGBE_EICR_LSC) {
983                 callout_stop(&adapter->timer);
984                 ixgbe_update_link_status(adapter);
985                 callout_reset(&adapter->timer, hz,
986                     ixgbe_local_timer, adapter);
987         }
988
989         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
990         ixgbe_enable_intr(adapter);
991         mtx_unlock(&adapter->core_mtx);
992 }
993 #endif /*  __FreeBSD_version >= 700000 */
994
995 /*********************************************************************
996  *
997  *  Media Ioctl callback
998  *
999  *  This routine is called whenever the user queries the status of
1000  *  the interface using ifconfig.
1001  *
1002  **********************************************************************/
1003 static void
1004 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1005 {
1006         struct adapter *adapter = ifp->if_softc;
1007
1008         INIT_DEBUGOUT("ixgbe_media_status: begin");
1009         ixgbe_update_link_status(adapter);
1010
1011         ifmr->ifm_status = IFM_AVALID;
1012         ifmr->ifm_active = IFM_ETHER;
1013
1014         if (!adapter->link_active)
1015                 return;
1016
1017         ifmr->ifm_status |= IFM_ACTIVE;
1018         ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1019
1020         return;
1021 }
1022
1023 /*********************************************************************
1024  *
1025  *  Media Ioctl callback
1026  *
1027  *  This routine is called when the user changes speed/duplex using
1028  *  media/mediopt option with ifconfig.
1029  *
1030  **********************************************************************/
1031 static int
1032 ixgbe_media_change(struct ifnet * ifp)
1033 {
1034         struct adapter *adapter = ifp->if_softc;
1035         struct ifmedia *ifm = &adapter->media;
1036
1037         INIT_DEBUGOUT("ixgbe_media_change: begin");
1038
1039         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1040                 return (EINVAL);
1041
1042         return (0);
1043 }
1044
1045 /*********************************************************************
1046  *
1047  *  This routine maps the mbufs to tx descriptors.
1048  *    WARNING: while this code is using an MQ style infrastructure,
1049  *    it would NOT work as is with more than 1 queue.
1050  *
1051  *  return 0 on success, positive on failure
1052  **********************************************************************/
1053
1054 static int
1055 ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp)
1056 {
1057         u32             olinfo_status = 0, cmd_type_len = 0;
1058         u32             paylen;
1059         int             i, j, error, nsegs;
1060         int             first, last = 0;
1061         struct mbuf     *m_head;
1062         bus_dma_segment_t segs[IXGBE_MAX_SCATTER];
1063         bus_dmamap_t    map;
1064         struct tx_ring  *txr = adapter->tx_rings;
1065         struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
1066         union ixgbe_adv_tx_desc *txd = NULL;
1067
1068         m_head = *m_headp;
1069         paylen = 0;
1070
1071         /* Basic descriptor defines */
1072         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
1073         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
1074
1075         if (m_head->m_flags & M_VLANTAG)
1076                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1077
1078         /*
1079          * Force a cleanup if number of TX descriptors
1080          * available is below the threshold. If it fails
1081          * to get above, then abort transmit.
1082          */
1083         if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1084                 ixgbe_txeof(txr);
1085                 /* Make sure things have improved */
1086                 if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
1087                         adapter->no_tx_desc_avail1++;
1088                         return (ENOBUFS);
1089                 }
1090         }
1091
1092         /*
1093          * Important to capture the first descriptor
1094          * used because it will contain the index of
1095          * the one we tell the hardware to report back
1096          */
1097         first = txr->next_avail_tx_desc;
1098         txbuf = &txr->tx_buffers[first];
1099         txbuf_mapped = txbuf;
1100         map = txbuf->map;
1101
1102         /*
1103          * Map the packet for DMA.
1104          */
1105         error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1106             *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1107
1108         if (error == EFBIG) {
1109                 struct mbuf *m;
1110
1111                 m = m_defrag(*m_headp, M_DONTWAIT);
1112                 if (m == NULL) {
1113                         adapter->mbuf_alloc_failed++;
1114                         m_freem(*m_headp);
1115                         *m_headp = NULL;
1116                         return (ENOBUFS);
1117                 }
1118                 *m_headp = m;
1119
1120                 /* Try it again */
1121                 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1122                     *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1123
1124                 if (error == ENOMEM) {
1125                         adapter->no_tx_dma_setup++;
1126                         return (error);
1127                 } else if (error != 0) {
1128                         adapter->no_tx_dma_setup++;
1129                         m_freem(*m_headp);
1130                         *m_headp = NULL;
1131                         return (error);
1132                 }
1133         } else if (error == ENOMEM) {
1134                 adapter->no_tx_dma_setup++;
1135                 return (error);
1136         } else if (error != 0) {
1137                 adapter->no_tx_dma_setup++;
1138                 m_freem(*m_headp);
1139                 *m_headp = NULL;
1140                 return (error);
1141         }
1142
1143         /* Make certain there are enough descriptors */
1144         if (nsegs > txr->tx_avail - 2) {
1145                 adapter->no_tx_desc_avail2++;
1146                 error = ENOBUFS;
1147                 goto encap_fail;
1148         }
1149         m_head = *m_headp;
1150
1151         /*
1152         ** Set the appropriate offload context
1153         ** this becomes the first descriptor of 
1154         ** a packet.
1155         */
1156         if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1157                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1158                 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1159                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1160                 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1161                 ++adapter->tso_tx;
1162         } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
1163                 if (ixgbe_tx_csum_setup(txr, m_head))
1164                         olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1165         }
1166
1167         i = txr->next_avail_tx_desc;
1168         for (j = 0; j < nsegs; j++) {
1169                 bus_size_t seglen;
1170                 bus_addr_t segaddr;
1171
1172                 txbuf = &txr->tx_buffers[i];
1173                 txd = &txr->tx_base[i];
1174                 seglen = segs[j].ds_len;
1175                 segaddr = htole64(segs[j].ds_addr);
1176
1177                 txd->read.buffer_addr = segaddr;
1178                 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1179                     cmd_type_len |seglen);
1180                 txd->read.olinfo_status = htole32(olinfo_status);
1181                 last = i; /* Next descriptor that will get completed */
1182
1183                 if (++i == adapter->num_tx_desc)
1184                         i = 0;
1185
1186                 txbuf->m_head = NULL;
1187                 txbuf->next_eop = -1;
1188                 /*
1189                 ** we have to do this inside the loop right now
1190                 ** because of the hardware workaround.
1191                 */
1192                 if (j == (nsegs -1)) /* Last descriptor gets EOP and RS */
1193                         txd->read.cmd_type_len |=
1194                             htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1195 #ifndef NO_82598_A0_SUPPORT
1196                 if (adapter->hw.revision_id == 0)
1197                         desc_flip(txd);
1198 #endif
1199         }
1200
1201         txr->tx_avail -= nsegs;
1202         txr->next_avail_tx_desc = i;
1203
1204         txbuf->m_head = m_head;
1205         txbuf->map = map;
1206         bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1207
1208         /* Set the index of the descriptor that will be marked done */
1209         txbuf = &txr->tx_buffers[first];
1210         txbuf->next_eop = last;
1211
1212         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1213             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214         /*
1215          * Advance the Transmit Descriptor Tail (Tdt), this tells the
1216          * hardware that this frame is available to transmit.
1217          */
1218         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1219         return (0);
1220
1221 encap_fail:
1222         bus_dmamap_unload(txr->txtag, txbuf->map);
1223         return (error);
1224
1225 }
1226
1227 static void
1228 ixgbe_set_promisc(struct adapter *adapter)
1229 {
1230
1231         u_int32_t       reg_rctl;
1232         struct ifnet   *ifp = adapter->ifp;
1233
1234         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1235
1236         if (ifp->if_flags & IFF_PROMISC) {
1237                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1238                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1239         } else if (ifp->if_flags & IFF_ALLMULTI) {
1240                 reg_rctl |= IXGBE_FCTRL_MPE;
1241                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1242                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1243         }
1244         return;
1245 }
1246
1247 static void
1248 ixgbe_disable_promisc(struct adapter * adapter)
1249 {
1250         u_int32_t       reg_rctl;
1251
1252         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1253
1254         reg_rctl &= (~IXGBE_FCTRL_UPE);
1255         reg_rctl &= (~IXGBE_FCTRL_MPE);
1256         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1257
1258         return;
1259 }
1260
1261
1262 /*********************************************************************
1263  *  Multicast Update
1264  *
1265  *  This routine is called whenever multicast address list is updated.
1266  *
1267  **********************************************************************/
1268 #define IXGBE_RAR_ENTRIES 16
1269
1270 static void
1271 ixgbe_set_multi(struct adapter *adapter)
1272 {
1273         uint32_t fctrl;
1274         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1275         struct ifmultiaddr *ifma;
1276         int             mcnt = 0;
1277         struct ifnet   *ifp = adapter->ifp;
1278
1279         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1280
1281         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1282         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1283         if (ifp->if_flags & IFF_PROMISC)
1284                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1285         else if (ifp->if_flags & IFF_ALLMULTI) {
1286                 fctrl |= IXGBE_FCTRL_MPE;
1287                 fctrl &= ~IXGBE_FCTRL_UPE;
1288         } else
1289                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1290         
1291         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1292
1293         IF_ADDR_LOCK(ifp);
1294         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1295                 if (ifma->ifma_addr->sa_family != AF_LINK)
1296                         continue;
1297                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1298                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1299                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1300                 mcnt++;
1301         }
1302         IF_ADDR_UNLOCK(ifp);
1303
1304         ixgbe_update_mc_addr_list(&adapter->hw, mta, mcnt, 0);
1305
1306         return;
1307 }
1308
1309
1310 /*********************************************************************
1311  *  Timer routine
1312  *
1313  *  This routine checks for link status,updates statistics,
1314  *  and runs the watchdog timer.
1315  *
1316  **********************************************************************/
1317
1318 static void
1319 ixgbe_local_timer(void *arg)
1320 {
1321         struct adapter *adapter = arg;
1322         struct ifnet   *ifp = adapter->ifp;
1323
1324         mtx_assert(&adapter->core_mtx, MA_OWNED);
1325
1326         ixgbe_update_link_status(adapter);
1327         ixgbe_update_stats_counters(adapter);
1328         if (ixgbe_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1329                 ixgbe_print_hw_stats(adapter);
1330         }
1331         /*
1332          * Each second we check the watchdog
1333          * to protect against hardware hangs.
1334          */
1335         ixgbe_watchdog(adapter);
1336
1337         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1338 }
1339
1340 static void
1341 ixgbe_update_link_status(struct adapter *adapter)
1342 {
1343         uint32_t  link_speed;
1344         boolean_t link_up = FALSE;
1345         struct ifnet    *ifp = adapter->ifp;
1346         device_t dev = adapter->dev;
1347
1348         ixgbe_check_link(&adapter->hw, &link_speed, &link_up);
1349
1350         if (link_up){ 
1351                 if (adapter->link_active == FALSE) {
1352                         if (bootverbose)
1353                                 device_printf(dev,"Link is up %d Mbps %s \n",
1354                                     10000, "Full Duplex");
1355                         adapter->link_active = TRUE;
1356                         if_link_state_change(ifp, LINK_STATE_UP);
1357                 }
1358         } else { /* Link down */
1359                 if (adapter->link_active == TRUE) {
1360                         if (bootverbose)
1361                                 device_printf(dev,"Link is Down\n");
1362                         if_link_state_change(ifp, LINK_STATE_DOWN);
1363                         adapter->link_active = FALSE;
1364                 }
1365         }
1366
1367         return;
1368 }
1369
1370
1371
1372 /*********************************************************************
1373  *
1374  *  This routine disables all traffic on the adapter by issuing a
1375  *  global reset on the MAC and deallocates TX/RX buffers.
1376  *
1377  **********************************************************************/
1378
1379 static void
1380 ixgbe_stop(void *arg)
1381 {
1382         struct ifnet   *ifp;
1383         struct adapter *adapter = arg;
1384         ifp = adapter->ifp;
1385
1386         mtx_assert(&adapter->core_mtx, MA_OWNED);
1387
1388         INIT_DEBUGOUT("ixgbe_stop: begin\n");
1389         ixgbe_disable_intr(adapter);
1390
1391         /* Tell the stack that the interface is no longer active */
1392         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1393
1394         ixgbe_reset_hw(&adapter->hw);
1395         adapter->hw.adapter_stopped = FALSE;
1396         ixgbe_stop_adapter(&adapter->hw);
1397         callout_stop(&adapter->timer);
1398
1399         /* reprogram the RAR[0] in case user changed it. */
1400         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1401
1402         return;
1403 }
1404
1405
1406 /*********************************************************************
1407  *
1408  *  Determine hardware revision.
1409  *
1410  **********************************************************************/
1411 static void
1412 ixgbe_identify_hardware(struct adapter *adapter)
1413 {
1414         device_t        dev = adapter->dev;
1415
1416         /* Save off the information about this board */
1417         adapter->hw.vendor_id = pci_get_vendor(dev);
1418         adapter->hw.device_id = pci_get_device(dev);
1419         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1420         adapter->hw.subsystem_vendor_id =
1421             pci_read_config(dev, PCIR_SUBVEND_0, 2);
1422         adapter->hw.subsystem_device_id =
1423             pci_read_config(dev, PCIR_SUBDEV_0, 2);
1424
1425         return;
1426 }
1427
1428 #if __FreeBSD_version >= 700000
1429 /*********************************************************************
1430  *
1431  *  Setup MSIX: this is a prereq for doing Multiqueue/RSS.
1432  *
1433  **********************************************************************/
1434 static int
1435 ixgbe_setup_msix(struct adapter *adapter)
1436 {
1437         device_t        dev = adapter->dev;
1438         struct          tx_ring *txr = adapter->tx_rings;
1439         struct          rx_ring *rxr = adapter->rx_rings;
1440         int             error, vector = 0;
1441
1442         /* TX setup: the code is here for multi tx,
1443            there are other parts of the driver not ready for it */
1444         for (int i = 0; i < adapter->num_tx_queues; i++, vector++, txr++) {
1445                 adapter->res[vector] = bus_alloc_resource_any(dev,
1446                     SYS_RES_IRQ, &adapter->rid[vector],
1447                     RF_SHAREABLE | RF_ACTIVE);
1448                 if (!adapter->res[vector]) {
1449                         device_printf(dev,"Unable to allocate"
1450                             " bus resource: tx interrupt [%d]\n", vector);
1451                         return (ENXIO);
1452                 }
1453                 /* Set the handler function */
1454                 error = bus_setup_intr(dev, adapter->res[vector],
1455                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
1456                     ixgbe_msix_tx, txr, &adapter->tag[vector]);
1457                 if (error) {
1458                         adapter->res[vector] = NULL;
1459                         device_printf(dev, "Failed to register TX handler");
1460                         return (error);
1461                 }
1462                 adapter->msix++;
1463         }
1464
1465         /* RX setup */
1466         for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rxr++) {
1467                 adapter->res[vector] = bus_alloc_resource_any(dev,
1468                     SYS_RES_IRQ, &adapter->rid[vector],
1469                     RF_SHAREABLE | RF_ACTIVE);
1470                 if (!adapter->res[vector]) {
1471                         device_printf(dev,"Unable to allocate"
1472                             " bus resource: rx interrupt [%d],"
1473                             "rid = %d\n", i, adapter->rid[vector]);
1474                         return (ENXIO);
1475                 }
1476                 /* Set the handler function */
1477                 error = bus_setup_intr(dev, adapter->res[vector],
1478                     INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_rx,
1479                     rxr, &adapter->tag[vector]);
1480                 if (error) {
1481                         adapter->res[vector] = NULL;
1482                         device_printf(dev, "Failed to register RX handler");
1483                         return (error);
1484                 }
1485                 adapter->msix++;
1486         }
1487
1488         /* Now for Link changes */
1489         adapter->res[vector] = bus_alloc_resource_any(dev,
1490             SYS_RES_IRQ, &adapter->rid[vector], RF_SHAREABLE | RF_ACTIVE);
1491         if (!adapter->res[vector]) {
1492                 device_printf(dev,"Unable to allocate"
1493             " bus resource: Link interrupt [%d]\n", adapter->rid[vector]);
1494                 return (ENXIO);
1495         }
1496         /* Set the link handler function */
1497         error = bus_setup_intr(dev, adapter->res[vector],
1498             INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_msix_link,
1499             adapter, &adapter->tag[vector]);
1500         if (error) {
1501                 adapter->res[vector] = NULL;
1502                 device_printf(dev, "Failed to register LINK handler");
1503                 return (error);
1504         }
1505         adapter->msix++;
1506
1507         return (0);
1508 }
1509 #endif
1510
1511 static int
1512 ixgbe_allocate_pci_resources(struct adapter *adapter)
1513 {
1514         int             error, rid;
1515         device_t        dev = adapter->dev;
1516
1517         rid = PCIR_BAR(0);
1518         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1519             &rid, RF_ACTIVE);
1520
1521         if (!(adapter->res_memory)) {
1522                 device_printf(dev,"Unable to allocate bus resource: memory\n");
1523                 return (ENXIO);
1524         }
1525
1526         adapter->osdep.mem_bus_space_tag =
1527                 rman_get_bustag(adapter->res_memory);
1528         adapter->osdep.mem_bus_space_handle =
1529                 rman_get_bushandle(adapter->res_memory);
1530         adapter->hw.hw_addr = (uint8_t *) &adapter->osdep.mem_bus_space_handle;
1531
1532         /*
1533          * First try to setup MSI/X interrupts,
1534          * if that fails fall back to Legacy.
1535          */
1536         if (ixgbe_allocate_msix_resources(adapter)) {
1537                 int val;
1538
1539                 adapter->num_tx_queues = 1;
1540                 adapter->num_rx_queues = 1;
1541                 val = 0;
1542
1543 #if __FreeBSD_version >= 700000
1544                 /* Attempt to use MSI */
1545                 val = pci_msi_count(dev);
1546                 if ((val) && pci_alloc_msi(dev, &val) == 0) {
1547                         adapter->rid[0] = 1;
1548                         device_printf(dev, "MSI Interrupts enabled\n");
1549                 } else
1550 #endif
1551                 {
1552                         adapter->rid[0] = 0;
1553                         device_printf(dev, "Legacy Interrupts enabled\n");
1554                 }
1555                 adapter->res[0] = bus_alloc_resource_any(dev,
1556                     SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
1557                 if (adapter->res[0] == NULL) {
1558                         device_printf(dev, "Unable to allocate bus "
1559                             "resource: interrupt\n");
1560                         return (ENXIO);
1561                 }
1562                 /* Set the handler contexts */
1563                 TASK_INIT(&adapter->rxtx_task, 0, ixgbe_rxtx, adapter);
1564                 TASK_INIT(&adapter->link_task, 0, ixgbe_link, adapter);
1565                 adapter->tq = taskqueue_create_fast("ix_taskq", M_NOWAIT,
1566                     taskqueue_thread_enqueue, &adapter->tq);
1567                 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
1568                     device_get_nameunit(adapter->dev));
1569 #if __FreeBSD_version < 700000
1570                 error = bus_setup_intr(dev, adapter->res[0],
1571                     INTR_TYPE_NET | INTR_FAST, ixgbe_fast_irq,
1572 #else
1573                 error = bus_setup_intr(dev, adapter->res[0],
1574                     INTR_TYPE_NET, ixgbe_fast_irq, NULL,
1575 #endif
1576                     adapter, &adapter->tag[0]);
1577                 if (error) {
1578                         adapter->res[0] = NULL;
1579                         device_printf(dev, "Failed to register"
1580                             " Fast Legacy handler");
1581                         return (error);
1582                 }
1583         }
1584
1585         adapter->hw.back = &adapter->osdep;
1586         return (0);
1587 }
1588
1589 #if __FreeBSD_version >= 700000
1590 /*
1591  * Attempt to configure MSI/X, the prefered
1592  * interrupt option.
1593  */
1594 static int
1595 ixgbe_allocate_msix_resources(struct adapter *adapter)
1596 {
1597         int             error, val, want, rid;
1598         device_t        dev = adapter->dev;
1599         int             vector = 1;
1600
1601
1602         /* First map the MSIX table */
1603         rid = PCIR_BAR(3);
1604         adapter->res_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1605             &rid, RF_ACTIVE);
1606         if (!adapter->res_msix) {
1607                 device_printf(dev,"Unable to map MSIX table \n");
1608                         return (ENXIO);
1609         }
1610
1611         /* Now figure out now many vectors we need to use */
1612         val = pci_msix_count(dev); 
1613
1614         /* check configured values */
1615         want = ixgbe_tx_queues + ixgbe_rx_queues + ixgbe_other_queues;
1616         /*
1617          *  We arent going to do anything fancy for now,
1618          *  we either can meet desired config or we fail.
1619          */
1620         if (val >= want) 
1621                 val = want;
1622         else 
1623                 return (ENXIO);
1624
1625         /* Initialize the resource arrays */
1626         for (int i = 0; i < IXGBE_MSGS; i++, vector++) {
1627                 adapter->rid[i] = vector;
1628                 adapter->tag[i] = NULL;
1629                 adapter->res[i] = NULL;
1630         }
1631
1632         adapter->num_tx_queues = ixgbe_tx_queues;
1633         adapter->num_rx_queues = ixgbe_rx_queues;
1634
1635         /* Now allocate the vectors */  
1636         if ((error = pci_alloc_msix(dev, &val)) == 0) {
1637                 adapter->msix = 1;
1638                 device_printf(dev,
1639                     "MSI/X enabled with %d vectors\n", val);
1640         } else {
1641                 device_printf(dev,
1642                     "FAIL pci_alloc_msix() %d\n", error);
1643                 return (error);
1644         }
1645         return (0);
1646 }
1647 #else   /* FreeBSD 6.2 */
1648 static int
1649 ixgbe_allocate_msix_resources(struct adapter *adapter)
1650 {
1651         return (1); /* Force Legacy behavior for 6.2 */
1652 }
1653 #endif
1654
1655 static void
1656 ixgbe_free_pci_resources(struct adapter * adapter)
1657 {
1658         device_t dev = adapter->dev;
1659         int             i, loop;
1660
1661         /*
1662          * Legacy has this set to 0, but we need
1663          * to run this once, so reset it.
1664          */
1665         if (adapter->msix)
1666                 loop = adapter->msix;
1667         else
1668                 loop = 1;
1669         /*
1670          * First release all the interrupt resources:
1671          *      notice that since these are just kept
1672          *      in an array we can do the same logic
1673          *      whether its MSIX or just legacy.
1674          */
1675         for (i = 0; i < loop; i++) {
1676                 if (adapter->tag[i] != NULL) {
1677                         bus_teardown_intr(dev, adapter->res[i],
1678                             adapter->tag[i]);
1679                         adapter->tag[i] = NULL;
1680                 }
1681                 if (adapter->res[i] != NULL) {
1682                         bus_release_resource(dev, SYS_RES_IRQ,
1683                             adapter->rid[i], adapter->res[i]);
1684                 }
1685         }
1686
1687 #if __FreeBSD_version >= 700000
1688         pci_release_msi(dev);
1689 #endif
1690         if (adapter->res_memory != NULL)
1691                 bus_release_resource(dev, SYS_RES_MEMORY,
1692                     IXGBE_MMBA, adapter->res_memory);
1693
1694         return;
1695 }
1696
1697 /*********************************************************************
1698  *
1699  *  Initialize the hardware to a configuration as specified by the
1700  *  adapter structure. The controller is reset, the EEPROM is
1701  *  verified, the MAC address is set, then the shared initialization
1702  *  routines are called.
1703  *
1704  **********************************************************************/
1705 static int
1706 ixgbe_hardware_init(struct adapter *adapter)
1707 {
1708         device_t dev = adapter->dev;
1709         uint16_t csum;
1710
1711         csum = 0;
1712         /* Issue a global reset */
1713         adapter->hw.adapter_stopped = FALSE;
1714         ixgbe_stop_adapter(&adapter->hw);
1715
1716         /* Make sure we have a good EEPROM before we read from it */
1717         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
1718                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
1719                 return (EIO);
1720         }
1721
1722         /* Get Hardware Flow Control setting */
1723         adapter->hw.fc.original_type = ixgbe_fc_full;
1724         adapter->hw.fc.pause_time = IXGBE_FC_PAUSE;
1725         adapter->hw.fc.low_water = IXGBE_FC_LO;
1726         adapter->hw.fc.high_water = IXGBE_FC_HI;
1727         adapter->hw.fc.send_xon = TRUE;
1728
1729         if (ixgbe_init_hw(&adapter->hw)) {
1730                 device_printf(dev,"Hardware Initialization Failed");
1731                 return (EIO);
1732         }
1733
1734         return (0);
1735 }
1736
1737 /*********************************************************************
1738  *
1739  *  Setup networking device structure and register an interface.
1740  *
1741  **********************************************************************/
1742 static void
1743 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1744 {
1745         struct ifnet   *ifp;
1746         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1747
1748         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1749         if (ifp == NULL)
1750                 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1751         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1752         ifp->if_mtu = ETHERMTU;
1753         ifp->if_baudrate = 1000000000;
1754         ifp->if_init = ixgbe_init;
1755         ifp->if_softc = adapter;
1756         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1757         ifp->if_ioctl = ixgbe_ioctl;
1758         ifp->if_start = ixgbe_start;
1759         ifp->if_timer = 0;
1760         ifp->if_watchdog = NULL;
1761         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1762
1763         ether_ifattach(ifp, adapter->hw.mac.addr);
1764
1765         adapter->max_frame_size =
1766             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1767
1768         /*
1769          * Tell the upper layer(s) we support long frames.
1770          */
1771         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1772
1773         if (adapter->msix) /* RSS and HWCSUM not compatible */
1774                 ifp->if_capabilities |= IFCAP_TSO4;
1775         else
1776                 ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4);
1777         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1778         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1779
1780         ifp->if_capenable = ifp->if_capabilities;
1781
1782         /*
1783          * Specify the media types supported by this adapter and register
1784          * callbacks to update media and link information
1785          */
1786         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1787                      ixgbe_media_status);
1788         ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR |
1789                     IFM_FDX, 0, NULL);
1790         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1791         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1792
1793         return;
1794 }
1795
1796 /********************************************************************
1797  * Manage DMA'able memory.
1798  *******************************************************************/
1799 static void
1800 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1801 {
1802         if (error)
1803                 return;
1804         *(bus_addr_t *) arg = segs->ds_addr;
1805         return;
1806 }
1807
1808 static int
1809 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
1810                 struct ixgbe_dma_alloc *dma, int mapflags)
1811 {
1812         device_t dev = adapter->dev;
1813         int             r;
1814
1815         r = bus_dma_tag_create(NULL,    /* parent */
1816                                PAGE_SIZE, 0,    /* alignment, bounds */
1817                                BUS_SPACE_MAXADDR,       /* lowaddr */
1818                                BUS_SPACE_MAXADDR,       /* highaddr */
1819                                NULL, NULL,      /* filter, filterarg */
1820                                size,    /* maxsize */
1821                                1,       /* nsegments */
1822                                size,    /* maxsegsize */
1823                                BUS_DMA_ALLOCNOW,        /* flags */
1824                                NULL,    /* lockfunc */
1825                                NULL,    /* lockfuncarg */
1826                                &dma->dma_tag);
1827         if (r != 0) {
1828                 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
1829                        "error %u\n", r);
1830                 goto fail_0;
1831         }
1832         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1833                              BUS_DMA_NOWAIT, &dma->dma_map);
1834         if (r != 0) {
1835                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1836                        "error %u\n", r);
1837                 goto fail_1;
1838         }
1839         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1840                             size,
1841                             ixgbe_dmamap_cb,
1842                             &dma->dma_paddr,
1843                             mapflags | BUS_DMA_NOWAIT);
1844         if (r != 0) {
1845                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
1846                        "error %u\n", r);
1847                 goto fail_2;
1848         }
1849         dma->dma_size = size;
1850         return (0);
1851 fail_2:
1852         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1853 fail_1:
1854         bus_dma_tag_destroy(dma->dma_tag);
1855 fail_0:
1856         dma->dma_map = NULL;
1857         dma->dma_tag = NULL;
1858         return (r);
1859 }
1860
1861 static void
1862 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
1863 {
1864         bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1865             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1866         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1867         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1868         bus_dma_tag_destroy(dma->dma_tag);
1869 }
1870
1871
1872 /*********************************************************************
1873  *
1874  *  Allocate memory for the transmit and receive rings, and then
1875  *  the descriptors associated with each, called only once at attach.
1876  *
1877  **********************************************************************/
1878 static int
1879 ixgbe_allocate_queues(struct adapter *adapter)
1880 {
1881         device_t dev = adapter->dev;
1882         struct tx_ring *txr;
1883         struct rx_ring *rxr;
1884         int rsize, tsize, error = IXGBE_SUCCESS;
1885         int txconf = 0, rxconf = 0;
1886
1887         /* First allocate the TX ring struct memory */
1888         if (!(adapter->tx_rings =
1889             (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1890             adapter->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1891                 device_printf(dev, "Unable to allocate TX ring memory\n");
1892                 error = ENOMEM;
1893                 goto fail;
1894         }
1895         txr = adapter->tx_rings;
1896
1897         /* Next allocate the RX */
1898         if (!(adapter->rx_rings =
1899             (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1900             adapter->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1901                 device_printf(dev, "Unable to allocate RX ring memory\n");
1902                 error = ENOMEM;
1903                 goto rx_fail;
1904         }
1905         rxr = adapter->rx_rings;
1906
1907         tsize = roundup2(adapter->num_tx_desc *
1908             sizeof(union ixgbe_adv_tx_desc), 4096);
1909         /*
1910          * Now set up the TX queues, txconf is needed to handle the
1911          * possibility that things fail midcourse and we need to
1912          * undo memory gracefully
1913          */ 
1914         for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) {
1915                 char    name_string[16];
1916                 /* Set up some basics */
1917                 txr = &adapter->tx_rings[i];
1918                 txr->adapter = adapter;
1919                 txr->me = i;
1920                 /*
1921                  * Initialize the TX side lock
1922                  *  -this has to change for multi tx
1923                  */
1924                 snprintf(name_string, sizeof(name_string), "%s:tx",
1925                     device_get_nameunit(dev));
1926                 mtx_init(&adapter->tx_mtx, name_string, NULL, MTX_DEF);
1927
1928                 if (ixgbe_dma_malloc(adapter, tsize,
1929                         &txr->txdma, BUS_DMA_NOWAIT)) {
1930                         device_printf(dev,
1931                             "Unable to allocate TX Descriptor memory\n");
1932                         error = ENOMEM;
1933                         goto err_tx_desc;
1934                 }
1935                 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1936                 bzero((void *)txr->tx_base, tsize);
1937
1938                 /* Now allocate transmit buffers for the ring */
1939                 if (ixgbe_allocate_transmit_buffers(txr)) {
1940                         device_printf(dev,
1941                             "Critical Failure setting up transmit buffers\n");
1942                         error = ENOMEM;
1943                         goto err_tx_desc;
1944                 }
1945
1946         }
1947
1948         /*
1949          * Next the RX queues...
1950          */ 
1951         rsize = roundup2(adapter->num_rx_desc *
1952             sizeof(union ixgbe_adv_rx_desc), 4096);
1953         for (int i = 0; i < adapter->num_rx_queues; i++, rxconf++) {
1954                 rxr = &adapter->rx_rings[i];
1955                 /* Set up some basics */
1956                 rxr->adapter = adapter;
1957                 rxr->me = i;
1958
1959                 if (ixgbe_dma_malloc(adapter, rsize,
1960                         &rxr->rxdma, BUS_DMA_NOWAIT)) {
1961                         device_printf(dev,
1962                             "Unable to allocate RxDescriptor memory\n");
1963                         error = ENOMEM;
1964                         goto err_rx_desc;
1965                 }
1966                 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1967                 bzero((void *)rxr->rx_base, rsize);
1968
1969                 /* Allocate receive buffers for the ring*/
1970                 if (ixgbe_allocate_receive_buffers(rxr)) {
1971                         device_printf(dev,
1972                             "Critical Failure setting up receive buffers\n");
1973                         error = ENOMEM;
1974                         goto err_rx_desc;
1975                 }
1976         }
1977
1978         return (0);
1979
1980 err_rx_desc:
1981         for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
1982                 ixgbe_dma_free(adapter, &rxr->rxdma);
1983 err_tx_desc:
1984         for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
1985                 ixgbe_dma_free(adapter, &txr->txdma);
1986         free(adapter->rx_rings, M_DEVBUF);
1987 rx_fail:
1988         free(adapter->tx_rings, M_DEVBUF);
1989 fail:
1990         return (error);
1991 }
1992
1993 /*********************************************************************
1994  *
1995  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1996  *  the information needed to transmit a packet on the wire. This is
1997  *  called only once at attach, setup is done every reset.
1998  *
1999  **********************************************************************/
2000 static int
2001 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2002 {
2003         struct adapter *adapter = txr->adapter;
2004         device_t dev = adapter->dev;
2005         struct ixgbe_tx_buf *txbuf;
2006         int error, i;
2007
2008         /*
2009          * Setup DMA descriptor areas.
2010          */
2011         if ((error = bus_dma_tag_create(NULL,           /* parent */
2012                                PAGE_SIZE, 0,            /* alignment, bounds */
2013                                BUS_SPACE_MAXADDR,       /* lowaddr */
2014                                BUS_SPACE_MAXADDR,       /* highaddr */
2015                                NULL, NULL,              /* filter, filterarg */
2016                                IXGBE_TSO_SIZE,          /* maxsize */
2017                                IXGBE_MAX_SCATTER,       /* nsegments */
2018                                PAGE_SIZE,               /* maxsegsize */
2019                                0,                       /* flags */
2020                                NULL,                    /* lockfunc */
2021                                NULL,                    /* lockfuncarg */
2022                                &txr->txtag))) {
2023                 device_printf(dev,"Unable to allocate TX DMA tag\n");
2024                 goto fail;
2025         }
2026
2027         if (!(txr->tx_buffers =
2028             (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
2029             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2030                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2031                 error = ENOMEM;
2032                 goto fail;
2033         }
2034
2035         /* Create the descriptor buffer dma maps */
2036         txbuf = txr->tx_buffers;
2037         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2038                 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2039                 if (error != 0) {
2040                         device_printf(dev, "Unable to create TX DMA map\n");
2041                         goto fail;
2042                 }
2043         }
2044
2045         return 0;
2046 fail:
2047         /* We free all, it handles case where we are in the middle */
2048         ixgbe_free_transmit_structures(adapter);
2049         return (error);
2050 }
2051
2052 /*********************************************************************
2053  *
2054  *  Initialize a transmit ring.
2055  *
2056  **********************************************************************/
2057 static void
2058 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2059 {
2060         struct adapter *adapter = txr->adapter;
2061         struct ixgbe_tx_buf *txbuf;
2062         int i;
2063
2064         /* Clear the old ring contents */
2065         bzero((void *)txr->tx_base,
2066               (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2067         /* Reset indices */
2068         txr->next_avail_tx_desc = 0;
2069         txr->next_tx_to_clean = 0;
2070
2071         /* Free any existing tx buffers. */
2072         txbuf = txr->tx_buffers;
2073         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2074                 if (txbuf->m_head != NULL) {
2075                         bus_dmamap_sync(txr->txtag, txbuf->map,
2076                             BUS_DMASYNC_POSTWRITE);
2077                         bus_dmamap_unload(txr->txtag, txbuf->map);
2078                         m_freem(txbuf->m_head);
2079                         txbuf->m_head = NULL;
2080                 }
2081                 /* clear the watch index */
2082                 txbuf->next_eop = -1;
2083         }
2084
2085         /* Set number of descriptors available */
2086         txr->tx_avail = adapter->num_tx_desc;
2087
2088         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2089             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2090
2091 }
2092
2093 /*********************************************************************
2094  *
2095  *  Initialize all transmit rings.
2096  *
2097  **********************************************************************/
2098 static int
2099 ixgbe_setup_transmit_structures(struct adapter *adapter)
2100 {
2101         struct tx_ring *txr = adapter->tx_rings;
2102
2103         for (int i = 0; i < adapter->num_tx_queues; i++, txr++)
2104                 ixgbe_setup_transmit_ring(txr);
2105
2106         return (0);
2107 }
2108
2109 /*********************************************************************
2110  *
2111  *  Enable transmit unit.
2112  *      NOTE: this will need to be changed if there are more than
2113  *      one transmit queues.
2114  **********************************************************************/
2115 static void
2116 ixgbe_initialize_transmit_units(struct adapter *adapter)
2117 {
2118         struct tx_ring *txr = adapter->tx_rings;
2119         uint64_t       tdba = txr->txdma.dma_paddr;
2120
2121         /* Setup the Base and Length of the Tx Descriptor Ring */
2122
2123         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
2124                        (tdba & 0x00000000ffffffffULL));
2125         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0), (tdba >> 32));
2126         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
2127                        adapter->num_tx_desc *
2128                        sizeof(struct ixgbe_legacy_tx_desc));
2129
2130         /* Setup the HW Tx Head and Tail descriptor pointers */
2131         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
2132         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
2133
2134         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
2135
2136         /* Setup Transmit Descriptor Cmd Settings */
2137         txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2138
2139         return;
2140 }
2141
2142 /*********************************************************************
2143  *
2144  *  Free all transmit rings.
2145  *
2146  **********************************************************************/
2147 static void
2148 ixgbe_free_transmit_structures(struct adapter *adapter)
2149 {
2150         struct tx_ring *txr = adapter->tx_rings;
2151         mtx_lock(&adapter->tx_mtx);
2152         for (int i = 0; i < adapter->num_tx_queues; i++, txr++) {
2153                 ixgbe_free_transmit_buffers(txr);
2154                 ixgbe_dma_free(adapter, &txr->txdma);
2155         }
2156         mtx_unlock(&adapter->tx_mtx);
2157         mtx_destroy(&adapter->tx_mtx);
2158         free(adapter->tx_rings, M_DEVBUF);
2159 }
2160
2161 /*********************************************************************
2162  *
2163  *  Free transmit ring related data structures.
2164  *
2165  **********************************************************************/
2166 static void
2167 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2168 {
2169         struct adapter *adapter = txr->adapter;
2170         struct ixgbe_tx_buf *tx_buffer;
2171         int             i;
2172
2173         INIT_DEBUGOUT("free_transmit_ring: begin");
2174
2175         if (txr->tx_buffers == NULL)
2176                 return;
2177
2178         tx_buffer = txr->tx_buffers;
2179         for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2180                 if (tx_buffer->m_head != NULL) {
2181                         bus_dmamap_sync(txr->txtag, tx_buffer->map,
2182                             BUS_DMASYNC_POSTWRITE);
2183                         bus_dmamap_unload(txr->txtag,
2184                             tx_buffer->map);
2185                         m_freem(tx_buffer->m_head);
2186                         tx_buffer->m_head = NULL;
2187                         if (tx_buffer->map != NULL) {
2188                                 bus_dmamap_destroy(txr->txtag,
2189                                     tx_buffer->map);
2190                                 tx_buffer->map = NULL;
2191                         }
2192                 } else if (tx_buffer->map != NULL) {
2193                         bus_dmamap_unload(txr->txtag,
2194                             tx_buffer->map);
2195                         bus_dmamap_destroy(txr->txtag,
2196                             tx_buffer->map);
2197                         tx_buffer->map = NULL;
2198                 }
2199         }
2200
2201         if (txr->tx_buffers != NULL) {
2202                 free(txr->tx_buffers, M_DEVBUF);
2203                 txr->tx_buffers = NULL;
2204         }
2205         if (txr->txtag != NULL) {
2206                 bus_dma_tag_destroy(txr->txtag);
2207                 txr->txtag = NULL;
2208         }
2209         return;
2210 }
2211
2212 /*********************************************************************
2213  *
2214  *  Advanced Context Descriptor setup for VLAN or CSUM
2215  *
2216  **********************************************************************/
2217
2218 static boolean_t
2219 ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp)
2220 {
2221         struct adapter *adapter = txr->adapter;
2222         struct ixgbe_adv_tx_context_desc *TXD;
2223         struct ixgbe_tx_buf        *tx_buffer;
2224         uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2225         struct ether_vlan_header *eh;
2226         struct ip *ip;
2227         struct ip6_hdr *ip6;
2228         int  ehdrlen, ip_hlen;
2229         u16     etype;
2230         u8      ipproto;
2231         int ctxd = txr->next_avail_tx_desc;
2232 #if __FreeBSD_version < 700000
2233         struct m_tag    *mtag;
2234 #else
2235         u16 vtag = 0;
2236 #endif
2237
2238
2239         tx_buffer = &txr->tx_buffers[ctxd];
2240         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2241
2242         /*
2243         ** In advanced descriptors the vlan tag must 
2244         ** be placed into the descriptor itself.
2245         */
2246 #if __FreeBSD_version < 700000
2247         mtag = VLAN_OUTPUT_TAG(ifp, mp);
2248         if (mtag != NULL)
2249                 vlan_macip_lens |=
2250                     htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT;
2251 #else
2252         if (mp->m_flags & M_VLANTAG) {
2253                 vtag = htole16(mp->m_pkthdr.ether_vtag);
2254                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2255         }
2256 #endif
2257         /*
2258          * Determine where frame payload starts.
2259          * Jump over vlan headers if already present,
2260          * helpful for QinQ too.
2261          */
2262         eh = mtod(mp, struct ether_vlan_header *);
2263         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2264                 etype = ntohs(eh->evl_proto);
2265                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2266         } else {
2267                 etype = ntohs(eh->evl_encap_proto);
2268                 ehdrlen = ETHER_HDR_LEN;
2269         }
2270
2271         /* Set the ether header length */
2272         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2273
2274         switch (etype) {
2275                 case ETHERTYPE_IP:
2276                         ip = (struct ip *)(mp->m_data + ehdrlen);
2277                         ip_hlen = ip->ip_hl << 2;
2278                         if (mp->m_len < ehdrlen + ip_hlen)
2279                                 return FALSE; /* failure */
2280                         ipproto = ip->ip_p;
2281                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2282                         break;
2283                 case ETHERTYPE_IPV6:
2284                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2285                         ip_hlen = sizeof(struct ip6_hdr);
2286                         if (mp->m_len < ehdrlen + ip_hlen)
2287                                 return FALSE; /* failure */
2288                         ipproto = ip6->ip6_nxt;
2289                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2290                         break;
2291                 default:
2292                         return FALSE;
2293         }
2294
2295         vlan_macip_lens |= ip_hlen;
2296         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2297
2298         switch (ipproto) {
2299                 case IPPROTO_TCP:
2300                         if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2301                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2302                         break;
2303                 case IPPROTO_UDP:
2304                         if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2305                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2306                         break;
2307         }
2308
2309         /* Now copy bits into descriptor */
2310         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2311         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2312         TXD->seqnum_seed = htole32(0);
2313         TXD->mss_l4len_idx = htole32(0);
2314
2315 #ifndef NO_82598_A0_SUPPORT
2316         if (adapter->hw.revision_id == 0)
2317                 desc_flip(TXD);
2318 #endif
2319
2320         tx_buffer->m_head = NULL;
2321         tx_buffer->next_eop = -1;
2322
2323         /* We've consumed the first desc, adjust counters */
2324         if (++ctxd == adapter->num_tx_desc)
2325                 ctxd = 0;
2326         txr->next_avail_tx_desc = ctxd;
2327         --txr->tx_avail;
2328
2329         return TRUE;
2330 }
2331
2332 #if __FreeBSD_version >= 700000
2333 /**********************************************************************
2334  *
2335  *  Setup work for hardware segmentation offload (TSO) on
2336  *  adapters using advanced tx descriptors
2337  *
2338  **********************************************************************/
2339 static boolean_t
2340 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2341 {
2342         struct adapter *adapter = txr->adapter;
2343         struct ixgbe_adv_tx_context_desc *TXD;
2344         struct ixgbe_tx_buf        *tx_buffer;
2345         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2346         u32 mss_l4len_idx = 0;
2347         u16 vtag = 0;
2348         int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
2349         struct ether_vlan_header *eh;
2350         struct ip *ip;
2351         struct tcphdr *th;
2352
2353         if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2354             (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2355                 return FALSE;
2356
2357         /*
2358          * Determine where frame payload starts.
2359          * Jump over vlan headers if already present
2360          */
2361         eh = mtod(mp, struct ether_vlan_header *);
2362         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
2363                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2364         else
2365                 ehdrlen = ETHER_HDR_LEN;
2366
2367         /* Ensure we have at least the IP+TCP header in the first mbuf. */
2368         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2369                 return FALSE;
2370
2371         ctxd = txr->next_avail_tx_desc;
2372         tx_buffer = &txr->tx_buffers[ctxd];
2373         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2374
2375         ip = (struct ip *)(mp->m_data + ehdrlen);
2376         if (ip->ip_p != IPPROTO_TCP)
2377                 return FALSE;   /* 0 */
2378         ip->ip_len = 0;
2379         ip->ip_sum = 0;
2380         ip_hlen = ip->ip_hl << 2;
2381         th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2382         th->th_sum = in_pseudo(ip->ip_src.s_addr,
2383             ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2384         tcp_hlen = th->th_off << 2;
2385         hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2386         /* This is used in the transmit desc in encap */
2387         *paylen = mp->m_pkthdr.len - hdrlen;
2388
2389         /* VLAN MACLEN IPLEN */
2390         if (mp->m_flags & M_VLANTAG) {
2391                 vtag = htole16(mp->m_pkthdr.ether_vtag);
2392                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2393         }
2394
2395         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2396         vlan_macip_lens |= ip_hlen;
2397         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2398
2399         /* ADV DTYPE TUCMD */
2400         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2401         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2402         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2403         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2404
2405
2406         /* MSS L4LEN IDX */
2407         mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2408         mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2409         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2410
2411         TXD->seqnum_seed = htole32(0);
2412         tx_buffer->m_head = NULL;
2413         tx_buffer->next_eop = -1;
2414
2415 #ifndef NO_82598_A0_SUPPORT
2416         if (adapter->hw.revision_id == 0)
2417                 desc_flip(TXD);
2418 #endif
2419
2420         if (++ctxd == adapter->num_tx_desc)
2421                 ctxd = 0;
2422
2423         txr->tx_avail--;
2424         txr->next_avail_tx_desc = ctxd;
2425         return TRUE;
2426 }
2427
2428 #else   /* For 6.2 RELEASE */
2429 /* This makes it easy to keep the code common */
2430 static boolean_t
2431 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2432 {
2433         return (FALSE);
2434 }
2435 #endif
2436
2437 /**********************************************************************
2438  *
2439  *  Examine each tx_buffer in the used queue. If the hardware is done
2440  *  processing the packet then free associated resources. The
2441  *  tx_buffer is put back on the free queue.
2442  *
2443  **********************************************************************/
2444 static boolean_t
2445 ixgbe_txeof(struct tx_ring *txr)
2446 {
2447         struct adapter * adapter = txr->adapter;
2448         struct ifnet    *ifp = adapter->ifp;
2449         int     first, last, done, num_avail;
2450         struct ixgbe_tx_buf *tx_buffer;
2451         struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2452
2453         mtx_assert(&adapter->tx_mtx, MA_OWNED);
2454
2455         if (txr->tx_avail == adapter->num_tx_desc)
2456                 return FALSE;
2457
2458         num_avail = txr->tx_avail;
2459         first = txr->next_tx_to_clean;
2460
2461         tx_buffer = &txr->tx_buffers[first];
2462         /* For cleanup we just use legacy struct */
2463         tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2464         last = tx_buffer->next_eop;
2465         if (last == -1)
2466                 return FALSE;
2467
2468         eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2469
2470         /*
2471          * What this does is get the index of the
2472          * first descriptor AFTER the EOP of the
2473          * first packet, that way we can do the
2474          * simple comparison on the inner while loop
2475          * below.
2476          */
2477         if (++last == adapter->num_tx_desc) last = 0;
2478         done = last;
2479
2480         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2481             BUS_DMASYNC_POSTREAD);
2482
2483         while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2484                 /* We clean the range of the packet */
2485                 while (first != done) {
2486                         tx_desc->upper.data = 0;
2487                         tx_desc->lower.data = 0;
2488                         tx_desc->buffer_addr = 0;
2489                         num_avail++;
2490
2491                         if (tx_buffer->m_head) {
2492                                 ifp->if_opackets++;
2493                                 bus_dmamap_sync(txr->txtag,
2494                                     tx_buffer->map,
2495                                     BUS_DMASYNC_POSTWRITE);
2496                                 bus_dmamap_unload(txr->txtag,
2497                                     tx_buffer->map);
2498                                 m_freem(tx_buffer->m_head);
2499                                 tx_buffer->m_head = NULL;
2500                                 tx_buffer->map = NULL;
2501                         }
2502                         tx_buffer->next_eop = -1;
2503
2504                         if (++first == adapter->num_tx_desc)
2505                                 first = 0;
2506
2507                         tx_buffer = &txr->tx_buffers[first];
2508                         tx_desc =
2509                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2510                 }
2511                 /* See if we can continue to the next packet */
2512                 last = tx_buffer->next_eop;
2513                 if (last != -1) {
2514                         eop_desc =
2515                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2516                         /* Get new done point */
2517                         if (++last == adapter->num_tx_desc) last = 0;
2518                         done = last;
2519                 } else
2520                         break;
2521
2522         }
2523         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2524             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2525
2526         txr->next_tx_to_clean = first;
2527
2528         /*
2529          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2530          * it is OK to send packets. If there are no pending descriptors,
2531          * clear the timeout. Otherwise, if some descriptors have been freed,
2532          * restart the timeout.
2533          */
2534         if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2535                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2536                 /* If all are clean turn off the timer */
2537                 if (num_avail == adapter->num_tx_desc)
2538                         adapter->watchdog_timer = 0;
2539                 /* Some were cleaned, so reset timer */
2540                 else if (num_avail == txr->tx_avail)
2541                         adapter->watchdog_timer = IXGBE_TX_TIMEOUT;
2542         }
2543
2544         txr->tx_avail = num_avail;
2545         return TRUE;
2546 }
2547
2548 /*********************************************************************
2549  *
2550  *  Get a buffer from system mbuf buffer pool.
2551  *
2552  **********************************************************************/
2553 static int
2554 ixgbe_get_buf(struct rx_ring *rxr, int i)
2555 {
2556         struct adapter  *adapter = rxr->adapter;
2557         struct mbuf     *mp;
2558         bus_dmamap_t    map;
2559         int             nsegs, error, old, s = 0;
2560         int             size = MCLBYTES;
2561
2562
2563         bus_dma_segment_t       segs[1];
2564         struct ixgbe_rx_buf     *rxbuf;
2565
2566         /* Are we going to Jumbo clusters? */
2567         if (adapter->bigbufs) {
2568                 size = MJUMPAGESIZE;
2569                 s = 1;
2570         };
2571         
2572         mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
2573         if (mp == NULL) {
2574                 adapter->mbuf_alloc_failed++;
2575                 return (ENOBUFS);
2576         }
2577
2578         mp->m_len = mp->m_pkthdr.len = size;
2579
2580         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2581                 m_adj(mp, ETHER_ALIGN);
2582
2583         /*
2584          * Using memory from the mbuf cluster pool, invoke the bus_dma
2585          * machinery to arrange the memory mapping.
2586          */
2587         error = bus_dmamap_load_mbuf_sg(rxr->rxtag[s], rxr->spare_map[s],
2588             mp, segs, &nsegs, BUS_DMA_NOWAIT);
2589         if (error) {
2590                 m_free(mp);
2591                 return (error);
2592         }
2593
2594         /* Now check our target buffer for existing mapping */
2595         rxbuf = &rxr->rx_buffers[i];
2596         old = rxbuf->bigbuf;
2597         if (rxbuf->m_head != NULL)
2598                 bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
2599
2600         map = rxbuf->map[old];
2601         rxbuf->map[s] = rxr->spare_map[s];
2602         rxr->spare_map[old] = map;
2603         bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s], BUS_DMASYNC_PREREAD);
2604         rxbuf->m_head = mp;
2605         rxbuf->bigbuf = s;
2606
2607         rxr->rx_base[i].read.pkt_addr = htole64(segs[0].ds_addr);
2608
2609 #ifndef NO_82598_A0_SUPPORT
2610         /* A0 needs to One's Compliment descriptors */
2611         if (adapter->hw.revision_id == 0) {
2612                 struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
2613                 struct dhack *d;   
2614
2615                 d = (struct dhack *)&rxr->rx_base[i];
2616                 d->a1 = ~(d->a1);
2617                 d->a2 = ~(d->a2);
2618         }
2619 #endif
2620
2621         return (0);
2622 }
2623
2624 /*********************************************************************
2625  *
2626  *  Allocate memory for rx_buffer structures. Since we use one
2627  *  rx_buffer per received packet, the maximum number of rx_buffer's
2628  *  that we'll need is equal to the number of receive descriptors
2629  *  that we've allocated.
2630  *
2631  **********************************************************************/
2632 static int
2633 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2634 {
2635         struct  adapter         *adapter = rxr->adapter;
2636         device_t                dev = adapter->dev;
2637         struct ixgbe_rx_buf     *rxbuf;
2638         int                     i, bsize, error;
2639
2640         bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
2641         if (!(rxr->rx_buffers =
2642             (struct ixgbe_rx_buf *) malloc(bsize,
2643             M_DEVBUF, M_NOWAIT | M_ZERO))) {
2644                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2645                 error = ENOMEM;
2646                 goto fail;
2647         }
2648
2649         /* First make the small (2K) tag/map */
2650         if ((error = bus_dma_tag_create(NULL,           /* parent */
2651                                    PAGE_SIZE, 0,        /* alignment, bounds */
2652                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2653                                    BUS_SPACE_MAXADDR,   /* highaddr */
2654                                    NULL, NULL,          /* filter, filterarg */
2655                                    MCLBYTES,            /* maxsize */
2656                                    1,                   /* nsegments */
2657                                    MCLBYTES,            /* maxsegsize */
2658                                    0,                   /* flags */
2659                                    NULL,                /* lockfunc */
2660                                    NULL,                /* lockfuncarg */
2661                                    &rxr->rxtag[0]))) {
2662                 device_printf(dev, "Unable to create RX Small DMA tag\n");
2663                 goto fail;
2664         }
2665
2666         /* Next make the large (4K) tag/map */
2667         if ((error = bus_dma_tag_create(NULL,           /* parent */
2668                                    PAGE_SIZE, 0,        /* alignment, bounds */
2669                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2670                                    BUS_SPACE_MAXADDR,   /* highaddr */
2671                                    NULL, NULL,          /* filter, filterarg */
2672                                    MJUMPAGESIZE,        /* maxsize */
2673                                    1,                   /* nsegments */
2674                                    MJUMPAGESIZE,        /* maxsegsize */
2675                                    0,                   /* flags */
2676                                    NULL,                /* lockfunc */
2677                                    NULL,                /* lockfuncarg */
2678                                    &rxr->rxtag[1]))) {
2679                 device_printf(dev, "Unable to create RX Large DMA tag\n");
2680                 goto fail;
2681         }
2682
2683         /* Create the spare maps (used by getbuf) */
2684         error = bus_dmamap_create(rxr->rxtag[0], BUS_DMA_NOWAIT,
2685              &rxr->spare_map[0]);
2686         error = bus_dmamap_create(rxr->rxtag[1], BUS_DMA_NOWAIT,
2687              &rxr->spare_map[1]);
2688         if (error) {
2689                 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
2690                     __func__, error);
2691                 goto fail;
2692         }
2693
2694         for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2695                 rxbuf = &rxr->rx_buffers[i];
2696                 error = bus_dmamap_create(rxr->rxtag[0],
2697                     BUS_DMA_NOWAIT, &rxbuf->map[0]);
2698                 if (error) {
2699                         device_printf(dev, "Unable to create Small RX DMA map\n");
2700                         goto fail;
2701                 }
2702                 error = bus_dmamap_create(rxr->rxtag[1],
2703                     BUS_DMA_NOWAIT, &rxbuf->map[1]);
2704                 if (error) {
2705                         device_printf(dev, "Unable to create Large RX DMA map\n");
2706                         goto fail;
2707                 }
2708         }
2709
2710         return (0);
2711
2712 fail:
2713         /* Frees all, but can handle partial completion */
2714         ixgbe_free_receive_structures(adapter);
2715         return (error);
2716 }
2717
2718 /*********************************************************************
2719  *
2720  *  Initialize a receive ring and its buffers.
2721  *
2722  **********************************************************************/
2723 static int
2724 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2725 {
2726         struct  adapter         *adapter;
2727         struct ixgbe_rx_buf *rxbuf;
2728         int j, rsize, s;
2729
2730         adapter = rxr->adapter;
2731         rsize = roundup2(adapter->num_rx_desc *
2732             sizeof(union ixgbe_adv_rx_desc), 4096);
2733         /* Clear the ring contents */
2734         bzero((void *)rxr->rx_base, rsize);
2735
2736         /*
2737         ** Free current RX buffers: the size buffer
2738         ** that is loaded is indicated by the buffer
2739         ** bigbuf value.
2740         */
2741         for (int i = 0; i < adapter->num_rx_desc; i++) {
2742                 rxbuf = &rxr->rx_buffers[i];
2743                 s = rxbuf->bigbuf;
2744                 if (rxbuf->m_head != NULL) {
2745                         bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2746                             BUS_DMASYNC_POSTREAD);
2747                         bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2748                         m_freem(rxbuf->m_head);
2749                         rxbuf->m_head = NULL;
2750                 }
2751         }
2752
2753         for (j = 0; j < adapter->num_rx_desc; j++) {
2754                 if (ixgbe_get_buf(rxr, j) == ENOBUFS) {
2755                         rxr->rx_buffers[j].m_head = NULL;
2756                         rxr->rx_base[j].read.pkt_addr = 0;
2757                         /* If we fail some may have change size */
2758                         s = adapter->bigbufs;
2759                         goto fail;
2760                 }
2761         }
2762
2763         /* Setup our descriptor indices */
2764         rxr->next_to_check = 0;
2765         rxr->last_cleaned = 0;
2766
2767         bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2768             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2769
2770         return (0);
2771 fail:
2772         /*
2773          * We need to clean up any buffers allocated so far
2774          * 'j' is the failing index, decrement it to get the
2775          * last success.
2776          */
2777         for (--j; j < 0; j--) {
2778                 rxbuf = &rxr->rx_buffers[j];
2779                 if (rxbuf->m_head != NULL) {
2780                         bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2781                             BUS_DMASYNC_POSTREAD);
2782                         bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2783                         m_freem(rxbuf->m_head);
2784                         rxbuf->m_head = NULL;
2785                 }
2786         }
2787         return (ENOBUFS);
2788 }
2789
2790 /*********************************************************************
2791  *
2792  *  Initialize all receive rings.
2793  *
2794  **********************************************************************/
2795 static int
2796 ixgbe_setup_receive_structures(struct adapter *adapter)
2797 {
2798         struct rx_ring *rxr = adapter->rx_rings;
2799         int i, j, s;
2800
2801         for (i = 0; i < adapter->num_rx_queues; i++, rxr++)
2802                 if (ixgbe_setup_receive_ring(rxr))
2803                         goto fail;
2804
2805         return (0);
2806 fail:
2807         /*
2808          * Free RX buffers allocated so far, we will only handle
2809          * the rings that completed, the failing case will have
2810          * cleaned up for itself. The value of 'i' will be the
2811          * failed ring so we must pre-decrement it.
2812          */
2813         rxr = adapter->rx_rings;
2814         for (--i; i > 0; i--, rxr++) {
2815                 for (j = 0; j < adapter->num_rx_desc; j++) {
2816                         struct ixgbe_rx_buf *rxbuf;
2817                         rxbuf = &rxr->rx_buffers[j];
2818                         s = rxbuf->bigbuf;
2819                         if (rxbuf->m_head != NULL) {
2820                                 bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2821                                   BUS_DMASYNC_POSTREAD);
2822                                 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2823                                 m_freem(rxbuf->m_head);
2824                                 rxbuf->m_head = NULL;
2825                         }
2826                 }
2827         }
2828
2829         return (ENOBUFS);
2830 }
2831
2832 /*********************************************************************
2833  *
2834  *  Enable receive unit.
2835  *
2836  **********************************************************************/
2837 static void
2838 ixgbe_initialize_receive_units(struct adapter *adapter)
2839 {
2840         struct  rx_ring *rxr = adapter->rx_rings;
2841         struct ifnet   *ifp = adapter->ifp;
2842         u32             rxctrl, fctrl, srrctl, rxcsum;
2843         u32             reta, mrqc, hlreg, linkvec;
2844         u32             random[10];
2845
2846
2847         /*
2848          * Make sure receives are disabled while
2849          * setting up the descriptor ring
2850          */
2851         rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2852         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2853             rxctrl & ~IXGBE_RXCTRL_RXEN);
2854
2855         /* Enable broadcasts */
2856         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2857         fctrl |= IXGBE_FCTRL_BAM;
2858         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2859
2860         hlreg = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
2861         if (ifp->if_mtu > ETHERMTU)
2862                 hlreg |= IXGBE_HLREG0_JUMBOEN;
2863         else
2864                 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2865         IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, hlreg);
2866
2867         srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
2868         srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2869         srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2870         if (adapter->bigbufs)
2871                 srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2872         else
2873                 srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2874         srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2875         IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
2876
2877         /* Set Queue moderation rate */
2878         for (int i = 0; i < IXGBE_MSGS; i++)
2879                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(i), DEFAULT_ITR);
2880
2881         /* Set Link moderation lower */
2882         linkvec = adapter->num_tx_queues + adapter->num_rx_queues;
2883         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(linkvec), LINK_ITR);
2884
2885         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
2886                 u64 rdba = rxr->rxdma.dma_paddr;
2887                 /* Setup the Base and Length of the Rx Descriptor Ring */
2888                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(i),
2889                                (rdba & 0x00000000ffffffffULL));
2890                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(i), (rdba >> 32));
2891                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(i),
2892                     adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2893
2894                 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2895                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(i), 0);
2896                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(i),
2897                     adapter->num_rx_desc - 1);
2898         }
2899
2900         if (adapter->num_rx_queues > 1) {
2901                 /* set up random bits */
2902                 arc4rand(&random, sizeof(random), 0);
2903                 switch (adapter->num_rx_queues) {
2904                         case 8:
2905                         case 4:
2906                                 reta = 0x00010203;
2907                                 break;
2908                         case 2:
2909                                 reta = 0x00010001;
2910                                 break;
2911                         default:
2912                                 reta = 0x00000000;
2913                 }
2914
2915                 /* Set up the redirection table */
2916                 for (int i = 0; i < 32; i++) {
2917                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RETA(i), reta);
2918                         if (adapter->num_rx_queues > 4) {
2919                                 ++i;
2920                                 IXGBE_WRITE_REG(&adapter->hw,
2921                                     IXGBE_RETA(i), 0x04050607);
2922                         }
2923                 }
2924
2925                 /* Now fill our hash function seeds */
2926                 for (int i = 0; i < 10; i++)
2927                         IXGBE_WRITE_REG_ARRAY(&adapter->hw,
2928                             IXGBE_RSSRK(0), i, random[i]);
2929
2930                 mrqc = IXGBE_MRQC_RSSEN
2931                     /* Perform hash on these packet types */
2932                     | IXGBE_MRQC_RSS_FIELD_IPV4
2933                     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2934                     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2935                     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2936                     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2937                     | IXGBE_MRQC_RSS_FIELD_IPV6
2938                     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2939                     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2940                     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2941                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc);
2942
2943                 /* RSS and RX IPP Checksum are mutually exclusive */
2944                 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
2945                 rxcsum |= IXGBE_RXCSUM_PCSD;
2946                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
2947         } else {
2948                 rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM);
2949                 if (ifp->if_capenable & IFCAP_RXCSUM)
2950                         rxcsum |= IXGBE_RXCSUM_IPPCSE;
2951                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum);
2952         }
2953
2954         /* Enable Receive engine */
2955         rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
2956         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl);
2957
2958         return;
2959 }
2960
2961 /*********************************************************************
2962  *
2963  *  Free all receive rings.
2964  *
2965  **********************************************************************/
2966 static void
2967 ixgbe_free_receive_structures(struct adapter *adapter)
2968 {
2969         struct rx_ring *rxr = adapter->rx_rings;
2970
2971         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
2972                 ixgbe_free_receive_buffers(rxr);
2973                 /* Free the ring memory as well */
2974                 ixgbe_dma_free(adapter, &rxr->rxdma);
2975         }
2976
2977         free(adapter->rx_rings, M_DEVBUF);
2978 }
2979
2980 /*********************************************************************
2981  *
2982  *  Free receive ring data structures
2983  *
2984  **********************************************************************/
2985 void
2986 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2987 {
2988         struct adapter          *adapter = NULL;
2989         struct ixgbe_rx_buf     *rxbuf = NULL;
2990
2991         INIT_DEBUGOUT("free_receive_buffers: begin");
2992         adapter = rxr->adapter;
2993         if (rxr->rx_buffers != NULL) {
2994                 rxbuf = &rxr->rx_buffers[0];
2995                 for (int i = 0; i < adapter->num_rx_desc; i++) {
2996                         int s = rxbuf->bigbuf;
2997                         if (rxbuf->map != NULL) {
2998                                 bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2999                                 bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
3000                         }
3001                         if (rxbuf->m_head != NULL) {
3002                                 m_freem(rxbuf->m_head);
3003                         }
3004                         rxbuf->m_head = NULL;
3005                         ++rxbuf;
3006                 }
3007         }
3008         if (rxr->rx_buffers != NULL) {
3009                 free(rxr->rx_buffers, M_DEVBUF);
3010                 rxr->rx_buffers = NULL;
3011         }
3012         for (int s = 0; s < 2; s++) {
3013                 if (rxr->rxtag[s] != NULL) {
3014                         bus_dma_tag_destroy(rxr->rxtag[s]);
3015                         rxr->rxtag[s] = NULL;
3016                 }
3017         }
3018         return;
3019 }
3020
3021 /*********************************************************************
3022  *
3023  *  This routine executes in interrupt context. It replenishes
3024  *  the mbufs in the descriptor and sends data which has been
3025  *  dma'ed into host memory to upper layer.
3026  *
3027  *  We loop at most count times if count is > 0, or until done if
3028  *  count < 0.
3029  *
3030  *********************************************************************/
3031 static int
3032 ixgbe_rxeof(struct rx_ring *rxr, int count)
3033 {
3034         struct adapter          *adapter = rxr->adapter;
3035         struct ifnet            *ifp = adapter->ifp;
3036         struct mbuf             *mp;
3037         int                     len, i, eop = 0;
3038         uint8_t                 accept_frame = 0;
3039         uint32_t                staterr;
3040         union ixgbe_adv_rx_desc *cur;
3041
3042
3043         i = rxr->next_to_check;
3044         cur = &rxr->rx_base[i];
3045         staterr = cur->wb.upper.status_error;
3046
3047         if (!(staterr & IXGBE_RXD_STAT_DD))
3048                 return (0);
3049
3050         while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
3051             (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3052                 struct mbuf *m = NULL;
3053                 int s;
3054
3055                 mp = rxr->rx_buffers[i].m_head;
3056                 s = rxr->rx_buffers[i].bigbuf;
3057                 bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
3058                                 BUS_DMASYNC_POSTREAD);
3059                 accept_frame = 1;
3060                 if (staterr & IXGBE_RXD_STAT_EOP) {
3061                         count--;
3062                         eop = 1;
3063                 } else {
3064                         eop = 0;
3065                 }
3066                 len = cur->wb.upper.length;
3067
3068                 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
3069                         accept_frame = 0;
3070
3071                 if (accept_frame) {
3072                         /* Get a fresh buffer first */
3073                         if (ixgbe_get_buf(rxr, i) != 0) {
3074                                 ifp->if_iqdrops++;
3075                                 goto discard;
3076                         }
3077
3078                         /* Assign correct length to the current fragment */
3079                         mp->m_len = len;
3080
3081                         if (rxr->fmp == NULL) {
3082                                 mp->m_pkthdr.len = len;
3083                                 rxr->fmp = mp; /* Store the first mbuf */
3084                                 rxr->lmp = mp;
3085                         } else {
3086                                 /* Chain mbuf's together */
3087                                 mp->m_flags &= ~M_PKTHDR;
3088                                 rxr->lmp->m_next = mp;
3089                                 rxr->lmp = rxr->lmp->m_next;
3090                                 rxr->fmp->m_pkthdr.len += len;
3091                         }
3092
3093                         if (eop) {
3094                                 rxr->fmp->m_pkthdr.rcvif = ifp;
3095                                 ifp->if_ipackets++;
3096                                 rxr->packet_count++;
3097                                 rxr->byte_count += rxr->fmp->m_pkthdr.len;
3098
3099                                 ixgbe_rx_checksum(adapter,
3100                                     staterr, rxr->fmp);
3101
3102                                 if (staterr & IXGBE_RXD_STAT_VP) {
3103 #if __FreeBSD_version < 700000
3104                                         VLAN_INPUT_TAG_NEW(ifp, rxr->fmp,
3105                                             (le16toh(cur->wb.upper.vlan) &
3106                                             IXGBE_RX_DESC_SPECIAL_VLAN_MASK));
3107 #else
3108                                         rxr->fmp->m_pkthdr.ether_vtag =
3109                                             le16toh(cur->wb.upper.vlan);
3110                                         rxr->fmp->m_flags |= M_VLANTAG;
3111 #endif
3112                                 }
3113                                 m = rxr->fmp;
3114                                 rxr->fmp = NULL;
3115                                 rxr->lmp = NULL;
3116                         }
3117                 } else {
3118                         ifp->if_ierrors++;
3119 discard:
3120                         /* Reuse loaded DMA map and just update mbuf chain */
3121                         mp = rxr->rx_buffers[i].m_head;
3122                         mp->m_len = mp->m_pkthdr.len =
3123                             (rxr->rx_buffers[i].bigbuf ? MJUMPAGESIZE:MCLBYTES);
3124                         mp->m_data = mp->m_ext.ext_buf;
3125                         mp->m_next = NULL;
3126                         if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
3127                                 m_adj(mp, ETHER_ALIGN);
3128                         if (rxr->fmp != NULL) {
3129                                 m_freem(rxr->fmp);
3130                                 rxr->fmp = NULL;
3131                                 rxr->lmp = NULL;
3132                         }
3133                         m = NULL;
3134                 }
3135
3136                 /* Zero out the receive descriptors status  */
3137                 cur->wb.upper.status_error = 0;
3138                 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3139                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3140
3141                 rxr->last_cleaned = i; /* for updating tail */
3142
3143                 if (++i == adapter->num_rx_desc)
3144                         i = 0;
3145
3146                 /* Now send up to the stack */
3147                 if (m != NULL) {
3148                         rxr->next_to_check = i;
3149                         (*ifp->if_input)(ifp, m);
3150                         i = rxr->next_to_check;
3151                 }
3152                 /* Get next descriptor */
3153                 cur = &rxr->rx_base[i];
3154                 staterr = cur->wb.upper.status_error;
3155         }
3156         rxr->next_to_check = i;
3157
3158         /* Advance the IXGB's Receive Queue "Tail Pointer" */
3159         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
3160
3161         if (!(staterr & IXGBE_RXD_STAT_DD))
3162                 return (0);
3163
3164         return (1);
3165 }
3166
3167 /*********************************************************************
3168  *
3169  *  Verify that the hardware indicated that the checksum is valid.
3170  *  Inform the stack about the status of checksum so that stack
3171  *  doesn't spend time verifying the checksum.
3172  *
3173  *********************************************************************/
3174 static void
3175 ixgbe_rx_checksum(struct adapter *adapter,
3176     uint32_t staterr, struct mbuf * mp)
3177 {
3178         uint16_t status = (uint16_t) staterr;
3179         uint8_t  errors = (uint8_t) (staterr >> 24);
3180
3181         /* Not offloaded */
3182         if (status & IXGBE_RXD_STAT_IXSM) {
3183                 mp->m_pkthdr.csum_flags = 0;
3184                 return;
3185         }
3186
3187         if (status & IXGBE_RXD_STAT_IPCS) {
3188                 /* Did it pass? */
3189                 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3190                         /* IP Checksum Good */
3191                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3192                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3193
3194                 } else
3195                         mp->m_pkthdr.csum_flags = 0;
3196         }
3197         if (status & IXGBE_RXD_STAT_L4CS) {
3198                 /* Did it pass? */
3199                 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3200                         mp->m_pkthdr.csum_flags |=
3201                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3202                         mp->m_pkthdr.csum_data = htons(0xffff);
3203                 } 
3204         }
3205         return;
3206 }
3207
3208
3209 static void
3210 ixgbe_enable_vlans(struct adapter *adapter)
3211 {
3212         uint32_t        ctrl;
3213
3214         ixgbe_disable_intr(adapter);
3215         ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
3216         ctrl |= IXGBE_VLNCTRL_VME;
3217         ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3218         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
3219         ixgbe_enable_intr(adapter);
3220
3221         return;
3222 }
3223
3224
3225 static void
3226 ixgbe_enable_intr(struct adapter *adapter)
3227 {
3228         u32 mask;
3229
3230         /* With RSS set up what to auto clear */
3231         if (adapter->msix) {
3232                 mask = IXGBE_EIMS_ENABLE_MASK;
3233                 mask &= ~IXGBE_EIMS_OTHER;
3234                 mask &= ~IXGBE_EIMS_LSC;
3235                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
3236         }
3237
3238         mask = IXGBE_EIMS_ENABLE_MASK;
3239         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_ENABLE_MASK);
3240         IXGBE_WRITE_FLUSH(&adapter->hw);
3241
3242         return;
3243 }
3244
3245 static void
3246 ixgbe_disable_intr(struct adapter *adapter)
3247 {
3248         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
3249         return;
3250 }
3251
3252 u16
3253 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
3254 {
3255         u16 value;
3256
3257         value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
3258             reg, 2);
3259
3260         return (value);
3261 }
3262
3263 static void
3264 ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector)
3265 {
3266         u32 ivar, index;
3267         vector |= IXGBE_IVAR_ALLOC_VAL;
3268         index = (entry >> 2) & 0x1F;
3269         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
3270         ivar |= (vector << (8 * (entry & 0x3)));
3271         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
3272 }
3273
3274 static void
3275 ixgbe_configure_ivars(struct adapter *adapter)
3276 {
3277         int     i, vec;
3278
3279         for (i = 0, vec = 1; i < adapter->num_rx_queues; i++, vec++)
3280                 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), vec);
3281
3282         for (i = 0, vec = 8; i < adapter->num_tx_queues; i++, vec++)
3283                 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), vec);
3284
3285         /* For the Link interrupt */
3286         ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0);
3287 }
3288
3289 /**********************************************************************
3290  *
3291  *  Update the board statistics counters.
3292  *
3293  **********************************************************************/
3294 static void
3295 ixgbe_update_stats_counters(struct adapter *adapter)
3296 {
3297         struct ifnet   *ifp;
3298         struct ixgbe_hw *hw = &adapter->hw;
3299         u64  good_rx, missed_rx;
3300
3301         adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3302
3303         good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
3304         missed_rx  = IXGBE_READ_REG(hw, IXGBE_MPC(0));
3305         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
3306         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
3307         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
3308         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
3309         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
3310         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
3311         missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
3312
3313         adapter->stats.gprc += (good_rx - missed_rx);
3314
3315         adapter->stats.mpc[0] += missed_rx;
3316         adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3317         adapter->stats.bprc += IXGBE_READ_REG(hw, IXGBE_BPRC);
3318         adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3319         /*
3320          * Workaround: mprc hardware is incorrectly counting
3321          * broadcasts, so for now we subtract those.
3322          */
3323         adapter->stats.mprc -= adapter->stats.bprc;
3324         adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3325         adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3326         adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3327         adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3328         adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3329         adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3330         adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3331
3332         adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3333         adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3334         adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3335         adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3336         adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3337         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3338         adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3339         adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3340         adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
3341         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3342         adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3343         adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3344         adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3345         adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3346         adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3347         adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3348         adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3349         adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3350         adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3351         adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3352         adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3353         adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3354         adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3355
3356         ifp = adapter->ifp;
3357
3358         /* Fill out the OS statistics structure */
3359         ifp->if_ipackets = adapter->stats.gprc;
3360         ifp->if_opackets = adapter->stats.gptc;
3361         ifp->if_ibytes = adapter->stats.gorc;
3362         ifp->if_obytes = adapter->stats.gotc;
3363         ifp->if_imcasts = adapter->stats.mprc;
3364         ifp->if_collisions = 0;
3365
3366         /* Rx Errors */
3367         ifp->if_ierrors =
3368                 adapter->stats.mpc[0] +
3369                 adapter->stats.crcerrs +
3370                 adapter->stats.rlec;
3371
3372 }
3373
3374
3375 /**********************************************************************
3376  *
3377  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3378  *  This routine provides a way to take a look at important statistics
3379  *  maintained by the driver and hardware.
3380  *
3381  **********************************************************************/
3382 static void
3383 ixgbe_print_hw_stats(struct adapter * adapter)
3384 {
3385         device_t dev = adapter->dev;
3386
3387
3388         device_printf(dev,"Tx Descriptors not Avail1 = %lu\n",
3389                adapter->no_tx_desc_avail1);
3390         device_printf(dev,"Tx Descriptors not Avail2 = %lu\n",
3391                adapter->no_tx_desc_avail2);
3392         device_printf(dev,"Std Mbuf Failed = %lu\n",
3393                adapter->mbuf_alloc_failed);
3394         device_printf(dev,"Std Cluster Failed = %lu\n",
3395                adapter->mbuf_cluster_failed);
3396
3397         device_printf(dev,"Missed Packets = %llu\n",
3398                (long long)adapter->stats.mpc[0]);
3399         device_printf(dev,"Receive length errors = %llu\n",
3400                ((long long)adapter->stats.roc +
3401                (long long)adapter->stats.ruc));
3402         device_printf(dev,"Crc errors = %llu\n",
3403                (long long)adapter->stats.crcerrs);
3404         device_printf(dev,"Driver dropped packets = %lu\n",
3405                adapter->dropped_pkts);
3406
3407         device_printf(dev,"XON Rcvd = %llu\n",
3408                (long long)adapter->stats.lxonrxc);
3409         device_printf(dev,"XON Xmtd = %llu\n",
3410                (long long)adapter->stats.lxontxc);
3411         device_printf(dev,"XOFF Rcvd = %llu\n",
3412                (long long)adapter->stats.lxoffrxc);
3413         device_printf(dev,"XOFF Xmtd = %llu\n",
3414                (long long)adapter->stats.lxofftxc);
3415
3416         device_printf(dev,"Total Packets Rcvd = %llu\n",
3417                (long long)adapter->stats.tpr);
3418         device_printf(dev,"Good Packets Rcvd = %llu\n",
3419                (long long)adapter->stats.gprc);
3420         device_printf(dev,"Good Packets Xmtd = %llu\n",
3421                (long long)adapter->stats.gptc);
3422         device_printf(dev,"TSO Transmissions = %lu\n",
3423                adapter->tso_tx);
3424
3425         return;
3426 }
3427
3428 /**********************************************************************
3429  *
3430  *  This routine is called only when em_display_debug_stats is enabled.
3431  *  This routine provides a way to take a look at important statistics
3432  *  maintained by the driver and hardware.
3433  *
3434  **********************************************************************/
3435 static void
3436 ixgbe_print_debug_info(struct adapter *adapter)
3437 {
3438         device_t dev = adapter->dev;
3439         struct rx_ring *rxr = adapter->rx_rings;
3440         struct ixgbe_hw *hw = &adapter->hw;
3441         uint8_t *hw_addr = adapter->hw.hw_addr;
3442  
3443         device_printf(dev,"Adapter hardware address = %p \n", hw_addr);
3444         device_printf(dev,"CTRL = 0x%x RXCTRL = 0x%x \n",
3445             IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)),
3446             IXGBE_READ_REG(hw, IXGBE_RXCTRL)); 
3447         device_printf(dev,"RXDCTL(0) = 0x%x RXDCTL(1) = 0x%x"
3448             " RXCTRL(2) = 0x%x \n",
3449             IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)),
3450             IXGBE_READ_REG(hw, IXGBE_RXDCTL(1)),
3451             IXGBE_READ_REG(hw, IXGBE_RXDCTL(2)));
3452         device_printf(dev,"SRRCTL(0) = 0x%x SRRCTL(1) = 0x%x"
3453             " SRRCTL(2) = 0x%x \n",
3454             IXGBE_READ_REG(hw, IXGBE_SRRCTL(0)),
3455             IXGBE_READ_REG(hw, IXGBE_SRRCTL(1)),
3456             IXGBE_READ_REG(hw, IXGBE_SRRCTL(2)));
3457         device_printf(dev,"EIMC = 0x%x EIMS = 0x%x\n",
3458             IXGBE_READ_REG(hw, IXGBE_EIMC),
3459             IXGBE_READ_REG(hw, IXGBE_EIMS));
3460         device_printf(dev,"Queue(0) tdh = %d, hw tdt = %d\n",
3461             IXGBE_READ_REG(hw, IXGBE_TDH(0)),
3462             IXGBE_READ_REG(hw, IXGBE_TDT(0)));
3463         device_printf(dev,"Error Byte Count = %u \n",
3464             IXGBE_READ_REG(hw, IXGBE_ERRBC));
3465
3466         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3467                 device_printf(dev,"Queue %d Packets Received: %lu\n",
3468                     rxr->me, (long)rxr->packet_count);
3469         }
3470
3471         rxr = adapter->rx_rings; // Reset
3472         for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) {
3473                 device_printf(dev,"Queue %d Bytes Received: %lu\n",
3474                     rxr->me, (long)rxr->byte_count);
3475         }
3476
3477         for (int i = 0; i < adapter->num_rx_queues; i++) {
3478                 device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n",
3479                     i, IXGBE_READ_REG(hw, IXGBE_RDH(i)),
3480                     IXGBE_READ_REG(hw, IXGBE_RDT(i)));
3481         }
3482
3483         return;
3484 }
3485
3486 static int
3487 ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS)
3488 {
3489         int             error;
3490         int             result;
3491         struct adapter *adapter;
3492
3493         result = -1;
3494         error = sysctl_handle_int(oidp, &result, 0, req);
3495
3496         if (error || !req->newptr)
3497                 return (error);
3498
3499         if (result == 1) {
3500                 adapter = (struct adapter *) arg1;
3501                 ixgbe_print_hw_stats(adapter);
3502         }
3503         return error;
3504 }
3505
3506 static int
3507 ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS)
3508 {
3509         int error, result;
3510         struct adapter *adapter;
3511
3512         result = -1;
3513         error = sysctl_handle_int(oidp, &result, 0, req);
3514
3515         if (error || !req->newptr)
3516                 return (error);
3517
3518         if (result == 1) {
3519                 adapter = (struct adapter *) arg1;
3520                 ixgbe_print_debug_info(adapter);
3521         }
3522         return error;
3523 }
3524
3525 /*
3526 ** Set flow control using sysctl:
3527 ** Flow control values:
3528 **      0 - off
3529 **      1 - rx pause
3530 **      2 - tx pause
3531 **      3 - full
3532 */
3533 static int
3534 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
3535 {
3536         int error;
3537         struct adapter *adapter;
3538
3539         error = sysctl_handle_int(oidp, &ixgbe_flow_control, 0, req);
3540
3541         if (error)
3542                 return (error);
3543
3544         adapter = (struct adapter *) arg1;
3545         switch (ixgbe_flow_control) {
3546                 case ixgbe_fc_rx_pause:
3547                 case ixgbe_fc_tx_pause:
3548                 case ixgbe_fc_full:
3549                         adapter->hw.fc.original_type = ixgbe_flow_control;
3550                         break;
3551                 case ixgbe_fc_none:
3552                 default:
3553                         adapter->hw.fc.original_type = ixgbe_fc_none;
3554         }
3555
3556         ixgbe_setup_fc(&adapter->hw, 0);
3557         return error;
3558 }
3559
3560 static void
3561 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
3562         const char *description, int *limit, int value)
3563 {
3564         *limit = value;
3565         SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3566             SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3567             OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3568 }
3569
3570 #ifndef NO_82598_A0_SUPPORT
3571 /*
3572  * A0 Workaround: invert descriptor for hardware
3573  */
3574 void
3575 desc_flip(void *desc)
3576 {
3577         struct dhack {u32 a1; u32 a2; u32 b1; u32 b2;};
3578         struct dhack *d;
3579
3580         d = (struct dhack *)desc;
3581         d->a1 = ~(d->a1);
3582         d->a2 = ~(d->a2);
3583         d->b1 = ~(d->b1);
3584         d->b2 = ~(d->b2);
3585         d->b2 &= 0xFFFFFFF0;
3586         d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
3587 }
3588 #endif
3589
3590
3591