]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgb/if_ixgb.c
MFV r306669:
[FreeBSD/FreeBSD.git] / sys / dev / ixgb / if_ixgb.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2004, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/ixgb/if_ixgb.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics
44  *********************************************************************/
45 int             ixgb_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Linked list of board private structures for all NICs found
49  *********************************************************************/
50
51 struct adapter *ixgb_adapter_list = NULL;
52
53
54
55 /*********************************************************************
56  *  Driver version
57  *********************************************************************/
58
59 char            ixgb_driver_version[] = "1.0.6";
60 char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62 /*********************************************************************
63  *  PCI Device ID Table
64  *
65  *  Used by probe to select devices to load on
66  *  Last field stores an index into ixgb_strings
67  *  Last entry must be all 0s
68  *
69  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70  *********************************************************************/
71
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73 {
74         /* Intel(R) PRO/10000 Network Connection */
75         {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76         {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77         /* required last entry */
78         {0, 0, 0, 0, 0}
79 };
80
81 /*********************************************************************
82  *  Table of branding strings for all supported NICs.
83  *********************************************************************/
84
85 static char    *ixgb_strings[] = {
86         "Intel(R) PRO/10GbE Network Driver"
87 };
88
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 static int      ixgb_probe(device_t);
93 static int      ixgb_attach(device_t);
94 static int      ixgb_detach(device_t);
95 static int      ixgb_shutdown(device_t);
96 static void     ixgb_intr(void *);
97 static void     ixgb_start(struct ifnet *);
98 static void     ixgb_start_locked(struct ifnet *);
99 static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static uint64_t ixgb_get_counter(struct ifnet *, ift_counter);
101 static void     ixgb_watchdog(struct adapter *);
102 static void     ixgb_init(void *);
103 static void     ixgb_init_locked(struct adapter *);
104 static void     ixgb_stop(void *);
105 static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
106 static int      ixgb_media_change(struct ifnet *);
107 static void     ixgb_identify_hardware(struct adapter *);
108 static int      ixgb_allocate_pci_resources(struct adapter *);
109 static void     ixgb_free_pci_resources(struct adapter *);
110 static void     ixgb_local_timer(void *);
111 static int      ixgb_hardware_init(struct adapter *);
112 static int      ixgb_setup_interface(device_t, struct adapter *);
113 static int      ixgb_setup_transmit_structures(struct adapter *);
114 static void     ixgb_initialize_transmit_unit(struct adapter *);
115 static int      ixgb_setup_receive_structures(struct adapter *);
116 static void     ixgb_initialize_receive_unit(struct adapter *);
117 static void     ixgb_enable_intr(struct adapter *);
118 static void     ixgb_disable_intr(struct adapter *);
119 static void     ixgb_free_transmit_structures(struct adapter *);
120 static void     ixgb_free_receive_structures(struct adapter *);
121 static void     ixgb_update_stats_counters(struct adapter *);
122 static void     ixgb_clean_transmit_interrupts(struct adapter *);
123 static int      ixgb_allocate_receive_structures(struct adapter *);
124 static int      ixgb_allocate_transmit_structures(struct adapter *);
125 static int      ixgb_process_receive_interrupts(struct adapter *, int);
126 static void 
127 ixgb_receive_checksum(struct adapter *,
128                       struct ixgb_rx_desc * rx_desc,
129                       struct mbuf *);
130 static void 
131 ixgb_transmit_checksum_setup(struct adapter *,
132                              struct mbuf *,
133                              u_int8_t *);
134 static void     ixgb_set_promisc(struct adapter *);
135 static void     ixgb_disable_promisc(struct adapter *);
136 static void     ixgb_set_multi(struct adapter *);
137 static void     ixgb_print_hw_stats(struct adapter *);
138 static void     ixgb_print_link_status(struct adapter *);
139 static int 
140 ixgb_get_buf(int i, struct adapter *,
141              struct mbuf *);
142 static void     ixgb_enable_vlans(struct adapter * adapter);
143 static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
144 static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145 static int 
146 ixgb_dma_malloc(struct adapter *, bus_size_t,
147                 struct ixgb_dma_alloc *, int);
148 static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
149 #ifdef DEVICE_POLLING
150 static poll_handler_t ixgb_poll;
151 #endif
152
153 /*********************************************************************
154  *  FreeBSD Device Interface Entry Points
155  *********************************************************************/
156
157 static device_method_t ixgb_methods[] = {
158         /* Device interface */
159         DEVMETHOD(device_probe, ixgb_probe),
160         DEVMETHOD(device_attach, ixgb_attach),
161         DEVMETHOD(device_detach, ixgb_detach),
162         DEVMETHOD(device_shutdown, ixgb_shutdown),
163
164         DEVMETHOD_END
165 };
166
167 static driver_t ixgb_driver = {
168         "ixgb", ixgb_methods, sizeof(struct adapter),
169 };
170
171 static devclass_t ixgb_devclass;
172 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
173
174 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
175 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
176
177 /* some defines for controlling descriptor fetches in h/w */
178 #define RXDCTL_PTHRESH_DEFAULT 128      /* chip considers prefech below this */
179 #define RXDCTL_HTHRESH_DEFAULT 16       /* chip will only prefetch if tail is
180                                          * pushed this many descriptors from
181                                          * head */
182 #define RXDCTL_WTHRESH_DEFAULT 0        /* chip writes back at this many or RXT0 */
183
184
185 /*********************************************************************
186  *  Device identification routine
187  *
188  *  ixgb_probe determines if the driver should be loaded on
189  *  adapter based on PCI vendor/device id of the adapter.
190  *
191  *  return 0 on success, positive on failure
192  *********************************************************************/
193
194 static int
195 ixgb_probe(device_t dev)
196 {
197         ixgb_vendor_info_t *ent;
198
199         u_int16_t       pci_vendor_id = 0;
200         u_int16_t       pci_device_id = 0;
201         u_int16_t       pci_subvendor_id = 0;
202         u_int16_t       pci_subdevice_id = 0;
203         char            adapter_name[60];
204
205         INIT_DEBUGOUT("ixgb_probe: begin");
206
207         pci_vendor_id = pci_get_vendor(dev);
208         if (pci_vendor_id != IXGB_VENDOR_ID)
209                 return (ENXIO);
210
211         pci_device_id = pci_get_device(dev);
212         pci_subvendor_id = pci_get_subvendor(dev);
213         pci_subdevice_id = pci_get_subdevice(dev);
214
215         ent = ixgb_vendor_info_array;
216         while (ent->vendor_id != 0) {
217                 if ((pci_vendor_id == ent->vendor_id) &&
218                     (pci_device_id == ent->device_id) &&
219
220                     ((pci_subvendor_id == ent->subvendor_id) ||
221                      (ent->subvendor_id == PCI_ANY_ID)) &&
222
223                     ((pci_subdevice_id == ent->subdevice_id) ||
224                      (ent->subdevice_id == PCI_ANY_ID))) {
225                         sprintf(adapter_name, "%s, Version - %s",
226                                 ixgb_strings[ent->index],
227                                 ixgb_driver_version);
228                         device_set_desc_copy(dev, adapter_name);
229                         return (BUS_PROBE_DEFAULT);
230                 }
231                 ent++;
232         }
233
234         return (ENXIO);
235 }
236
237 /*********************************************************************
238  *  Device initialization routine
239  *
240  *  The attach entry point is called when the driver is being loaded.
241  *  This routine identifies the type of hardware, allocates all resources
242  *  and initializes the hardware.
243  *
244  *  return 0 on success, positive on failure
245  *********************************************************************/
246
247 static int
248 ixgb_attach(device_t dev)
249 {
250         struct adapter *adapter;
251         int             tsize, rsize;
252         int             error = 0;
253
254         device_printf(dev, "%s\n", ixgb_copyright);
255         INIT_DEBUGOUT("ixgb_attach: begin");
256
257         /* Allocate, clear, and link in our adapter structure */
258         if (!(adapter = device_get_softc(dev))) {
259                 device_printf(dev, "adapter structure allocation failed\n");
260                 return (ENOMEM);
261         }
262         bzero(adapter, sizeof(struct adapter));
263         adapter->dev = dev;
264         adapter->osdep.dev = dev;
265         IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
266
267         if (ixgb_adapter_list != NULL)
268                 ixgb_adapter_list->prev = adapter;
269         adapter->next = ixgb_adapter_list;
270         ixgb_adapter_list = adapter;
271
272         /* SYSCTL APIs */
273         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
274                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
276                         (void *)adapter, 0,
277                         ixgb_sysctl_stats, "I", "Statistics");
278
279         callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
280
281         /* Determine hardware revision */
282         ixgb_identify_hardware(adapter);
283
284         /* Parameters (to be read from user) */
285         adapter->num_tx_desc = IXGB_MAX_TXD;
286         adapter->num_rx_desc = IXGB_MAX_RXD;
287         adapter->tx_int_delay = TIDV;
288         adapter->rx_int_delay = RDTR;
289         adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
290
291         adapter->hw.fc.high_water = FCRTH;
292         adapter->hw.fc.low_water = FCRTL;
293         adapter->hw.fc.pause_time = FCPAUSE;
294         adapter->hw.fc.send_xon = TRUE;
295         adapter->hw.fc.type = FLOW_CONTROL;
296
297
298         /* Set the max frame size assuming standard ethernet sized frames */
299         adapter->hw.max_frame_size =
300                 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
301
302         if (ixgb_allocate_pci_resources(adapter)) {
303                 device_printf(dev, "Allocation of PCI resources failed\n");
304                 error = ENXIO;
305                 goto err_pci;
306         }
307         tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
308                              sizeof(struct ixgb_tx_desc), 4096);
309
310         /* Allocate Transmit Descriptor ring */
311         if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
312                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
313                 error = ENOMEM;
314                 goto err_tx_desc;
315         }
316         adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
317
318         rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
319                              sizeof(struct ixgb_rx_desc), 4096);
320
321         /* Allocate Receive Descriptor ring */
322         if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
323                 device_printf(dev, "Unable to allocate rx_desc memory\n");
324                 error = ENOMEM;
325                 goto err_rx_desc;
326         }
327         adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
328
329         /* Allocate multicast array memory. */
330         adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
331             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
332         if (adapter->mta == NULL) {
333                 device_printf(dev, "Can not allocate multicast setup array\n");
334                 error = ENOMEM;
335                 goto err_hw_init;
336         }
337
338         /* Initialize the hardware */
339         if (ixgb_hardware_init(adapter)) {
340                 device_printf(dev, "Unable to initialize the hardware\n");
341                 error = EIO;
342                 goto err_hw_init;
343         }
344         /* Setup OS specific network interface */
345         if (ixgb_setup_interface(dev, adapter) != 0)
346                 goto err_hw_init;
347
348         /* Initialize statistics */
349         ixgb_clear_hw_cntrs(&adapter->hw);
350         ixgb_update_stats_counters(adapter);
351
352         INIT_DEBUGOUT("ixgb_attach: end");
353         return (0);
354
355 err_hw_init:
356         ixgb_dma_free(adapter, &adapter->rxdma);
357 err_rx_desc:
358         ixgb_dma_free(adapter, &adapter->txdma);
359 err_tx_desc:
360 err_pci:
361         if (adapter->ifp != NULL)
362                 if_free(adapter->ifp);
363         ixgb_free_pci_resources(adapter);
364         sysctl_ctx_free(&adapter->sysctl_ctx);
365         free(adapter->mta, M_DEVBUF);
366         return (error);
367
368 }
369
370 /*********************************************************************
371  *  Device removal routine
372  *
373  *  The detach entry point is called when the driver is being removed.
374  *  This routine stops the adapter and deallocates all the resources
375  *  that were allocated for driver operation.
376  *
377  *  return 0 on success, positive on failure
378  *********************************************************************/
379
380 static int
381 ixgb_detach(device_t dev)
382 {
383         struct adapter *adapter = device_get_softc(dev);
384         struct ifnet   *ifp = adapter->ifp;
385
386         INIT_DEBUGOUT("ixgb_detach: begin");
387
388 #ifdef DEVICE_POLLING
389         if (ifp->if_capenable & IFCAP_POLLING)
390                 ether_poll_deregister(ifp);
391 #endif
392
393         IXGB_LOCK(adapter);
394         adapter->in_detach = 1;
395
396         ixgb_stop(adapter);
397         IXGB_UNLOCK(adapter);
398
399 #if __FreeBSD_version < 500000
400         ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
401 #else
402         ether_ifdetach(ifp);
403 #endif
404         callout_drain(&adapter->timer);
405         ixgb_free_pci_resources(adapter);
406 #if __FreeBSD_version >= 500000
407         if_free(ifp);
408 #endif
409
410         /* Free Transmit Descriptor ring */
411         if (adapter->tx_desc_base) {
412                 ixgb_dma_free(adapter, &adapter->txdma);
413                 adapter->tx_desc_base = NULL;
414         }
415         /* Free Receive Descriptor ring */
416         if (adapter->rx_desc_base) {
417                 ixgb_dma_free(adapter, &adapter->rxdma);
418                 adapter->rx_desc_base = NULL;
419         }
420         /* Remove from the adapter list */
421         if (ixgb_adapter_list == adapter)
422                 ixgb_adapter_list = adapter->next;
423         if (adapter->next != NULL)
424                 adapter->next->prev = adapter->prev;
425         if (adapter->prev != NULL)
426                 adapter->prev->next = adapter->next;
427         free(adapter->mta, M_DEVBUF);
428
429         IXGB_LOCK_DESTROY(adapter);
430         return (0);
431 }
432
433 /*********************************************************************
434  *
435  *  Shutdown entry point
436  *
437  **********************************************************************/
438
439 static int
440 ixgb_shutdown(device_t dev)
441 {
442         struct adapter *adapter = device_get_softc(dev);
443         IXGB_LOCK(adapter);
444         ixgb_stop(adapter);
445         IXGB_UNLOCK(adapter);
446         return (0);
447 }
448
449
450 /*********************************************************************
451  *  Transmit entry point
452  *
453  *  ixgb_start is called by the stack to initiate a transmit.
454  *  The driver will remain in this routine as long as there are
455  *  packets to transmit and transmit resources are available.
456  *  In case resources are not available stack is notified and
457  *  the packet is requeued.
458  **********************************************************************/
459
460 static void
461 ixgb_start_locked(struct ifnet * ifp)
462 {
463         struct mbuf    *m_head;
464         struct adapter *adapter = ifp->if_softc;
465
466         IXGB_LOCK_ASSERT(adapter);
467
468         if (!adapter->link_active)
469                 return;
470
471         while (ifp->if_snd.ifq_head != NULL) {
472                 IF_DEQUEUE(&ifp->if_snd, m_head);
473
474                 if (m_head == NULL)
475                         break;
476
477                 if (ixgb_encap(adapter, m_head)) {
478                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
479                         IF_PREPEND(&ifp->if_snd, m_head);
480                         break;
481                 }
482                 /* Send a copy of the frame to the BPF listener */
483 #if __FreeBSD_version < 500000
484                 if (ifp->if_bpf)
485                         bpf_mtap(ifp, m_head);
486 #else
487                 ETHER_BPF_MTAP(ifp, m_head);
488 #endif
489                 /* Set timeout in case hardware has problems transmitting */
490                 adapter->tx_timer = IXGB_TX_TIMEOUT;
491
492         }
493         return;
494 }
495
496 static void
497 ixgb_start(struct ifnet *ifp)
498 {
499         struct adapter *adapter = ifp->if_softc;
500
501         IXGB_LOCK(adapter);
502         ixgb_start_locked(ifp);
503         IXGB_UNLOCK(adapter);
504         return;
505 }
506
507 /*********************************************************************
508  *  Ioctl entry point
509  *
510  *  ixgb_ioctl is called when the user wants to configure the
511  *  interface.
512  *
513  *  return 0 on success, positive on failure
514  **********************************************************************/
515
516 static int
517 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
518 {
519         int             mask, error = 0;
520         struct ifreq   *ifr = (struct ifreq *) data;
521         struct adapter *adapter = ifp->if_softc;
522
523         if (adapter->in_detach)
524                 goto out;
525
526         switch (command) {
527         case SIOCSIFADDR:
528         case SIOCGIFADDR:
529                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
530                 ether_ioctl(ifp, command, data);
531                 break;
532         case SIOCSIFMTU:
533                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
534                 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
535                         error = EINVAL;
536                 } else {
537                         IXGB_LOCK(adapter);
538                         ifp->if_mtu = ifr->ifr_mtu;
539                         adapter->hw.max_frame_size =
540                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
541
542                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
543                                 ixgb_init_locked(adapter);
544                         IXGB_UNLOCK(adapter);
545                 }
546                 break;
547         case SIOCSIFFLAGS:
548                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
549                 IXGB_LOCK(adapter);
550                 if (ifp->if_flags & IFF_UP) {
551                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
552                                 ixgb_init_locked(adapter);
553                         }
554                         ixgb_disable_promisc(adapter);
555                         ixgb_set_promisc(adapter);
556                 } else {
557                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
558                                 ixgb_stop(adapter);
559                         }
560                 }
561                 IXGB_UNLOCK(adapter);
562                 break;
563         case SIOCADDMULTI:
564         case SIOCDELMULTI:
565                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
566                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
567                         IXGB_LOCK(adapter);
568                         ixgb_disable_intr(adapter);
569                         ixgb_set_multi(adapter);
570                         ixgb_enable_intr(adapter);
571                         IXGB_UNLOCK(adapter);
572                 }
573                 break;
574         case SIOCSIFMEDIA:
575         case SIOCGIFMEDIA:
576                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
577                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
578                 break;
579         case SIOCSIFCAP:
580                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
581                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
582 #ifdef DEVICE_POLLING
583                 if (mask & IFCAP_POLLING) {
584                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
585                                 error = ether_poll_register(ixgb_poll, ifp);
586                                 if (error)
587                                         return(error);
588                                 IXGB_LOCK(adapter);
589                                 ixgb_disable_intr(adapter);
590                                 ifp->if_capenable |= IFCAP_POLLING;
591                                 IXGB_UNLOCK(adapter);
592                         } else {
593                                 error = ether_poll_deregister(ifp);
594                                 /* Enable interrupt even in error case */
595                                 IXGB_LOCK(adapter);
596                                 ixgb_enable_intr(adapter);
597                                 ifp->if_capenable &= ~IFCAP_POLLING;
598                                 IXGB_UNLOCK(adapter);
599                         }
600                 }
601 #endif /* DEVICE_POLLING */
602                 if (mask & IFCAP_HWCSUM) {
603                         if (IFCAP_HWCSUM & ifp->if_capenable)
604                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
605                         else
606                                 ifp->if_capenable |= IFCAP_HWCSUM;
607                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
608                                 ixgb_init(adapter);
609                 }
610                 break;
611         default:
612                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
613                 error = EINVAL;
614         }
615
616 out:
617         return (error);
618 }
619
620 /*********************************************************************
621  *  Watchdog entry point
622  *
623  *  This routine is called whenever hardware quits transmitting.
624  *
625  **********************************************************************/
626
627 static void
628 ixgb_watchdog(struct adapter *adapter)
629 {
630         struct ifnet *ifp;
631
632         ifp = adapter->ifp;
633
634         /*
635          * If we are in this routine because of pause frames, then don't
636          * reset the hardware.
637          */
638         if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
639                 adapter->tx_timer = IXGB_TX_TIMEOUT;
640                 return;
641         }
642         if_printf(ifp, "watchdog timeout -- resetting\n");
643
644         ixgb_stop(adapter);
645         ixgb_init_locked(adapter);
646
647
648         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
649
650         return;
651 }
652
653 /*********************************************************************
654  *  Init entry point
655  *
656  *  This routine is used in two ways. It is used by the stack as
657  *  init entry point in network interface structure. It is also used
658  *  by the driver as a hw/sw initialization routine to get to a
659  *  consistent state.
660  *
661  *  return 0 on success, positive on failure
662  **********************************************************************/
663
664 static void
665 ixgb_init_locked(struct adapter *adapter)
666 {
667         struct ifnet   *ifp;
668
669         INIT_DEBUGOUT("ixgb_init: begin");
670
671         IXGB_LOCK_ASSERT(adapter);
672
673         ixgb_stop(adapter);
674         ifp = adapter->ifp;
675
676         /* Get the latest mac address, User can use a LAA */
677         bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
678             IXGB_ETH_LENGTH_OF_ADDRESS);
679
680         /* Initialize the hardware */
681         if (ixgb_hardware_init(adapter)) {
682                 if_printf(ifp, "Unable to initialize the hardware\n");
683                 return;
684         }
685         ixgb_enable_vlans(adapter);
686
687         /* Prepare transmit descriptors and buffers */
688         if (ixgb_setup_transmit_structures(adapter)) {
689                 if_printf(ifp, "Could not setup transmit structures\n");
690                 ixgb_stop(adapter);
691                 return;
692         }
693         ixgb_initialize_transmit_unit(adapter);
694
695         /* Setup Multicast table */
696         ixgb_set_multi(adapter);
697
698         /* Prepare receive descriptors and buffers */
699         if (ixgb_setup_receive_structures(adapter)) {
700                 if_printf(ifp, "Could not setup receive structures\n");
701                 ixgb_stop(adapter);
702                 return;
703         }
704         ixgb_initialize_receive_unit(adapter);
705
706         /* Don't lose promiscuous settings */
707         ixgb_set_promisc(adapter);
708
709         ifp = adapter->ifp;
710         ifp->if_drv_flags |= IFF_DRV_RUNNING;
711         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
712
713
714         if (ifp->if_capenable & IFCAP_TXCSUM)
715                 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
716         else
717                 ifp->if_hwassist = 0;
718
719
720         /* Enable jumbo frames */
721         if (ifp->if_mtu > ETHERMTU) {
722                 uint32_t        temp_reg;
723                 IXGB_WRITE_REG(&adapter->hw, MFS,
724                                adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
725                 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
726                 temp_reg |= IXGB_CTRL0_JFE;
727                 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
728         }
729         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
730         ixgb_clear_hw_cntrs(&adapter->hw);
731 #ifdef DEVICE_POLLING
732         /*
733          * Only disable interrupts if we are polling, make sure they are on
734          * otherwise.
735          */
736         if (ifp->if_capenable & IFCAP_POLLING)
737                 ixgb_disable_intr(adapter);
738         else
739 #endif
740                 ixgb_enable_intr(adapter);
741
742         return;
743 }
744
745 static void
746 ixgb_init(void *arg)
747 {
748         struct adapter *adapter = arg;
749
750         IXGB_LOCK(adapter);
751         ixgb_init_locked(adapter);
752         IXGB_UNLOCK(adapter);
753         return;
754 }
755
756 #ifdef DEVICE_POLLING
757 static int
758 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
759 {
760         struct adapter *adapter = ifp->if_softc;
761         u_int32_t       reg_icr;
762         int             rx_npkts;
763
764         IXGB_LOCK_ASSERT(adapter);
765
766         if (cmd == POLL_AND_CHECK_STATUS) {
767                 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
768                 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
769                         ixgb_check_for_link(&adapter->hw);
770                         ixgb_print_link_status(adapter);
771                 }
772         }
773         rx_npkts = ixgb_process_receive_interrupts(adapter, count);
774         ixgb_clean_transmit_interrupts(adapter);
775
776         if (ifp->if_snd.ifq_head != NULL)
777                 ixgb_start_locked(ifp);
778         return (rx_npkts);
779 }
780
781 static int
782 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
783 {
784         struct adapter *adapter = ifp->if_softc;
785         int rx_npkts = 0;
786
787         IXGB_LOCK(adapter);
788         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
789                 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
790         IXGB_UNLOCK(adapter);
791         return (rx_npkts);
792 }
793 #endif /* DEVICE_POLLING */
794
795 /*********************************************************************
796  *
797  *  Interrupt Service routine
798  *
799  **********************************************************************/
800
801 static void
802 ixgb_intr(void *arg)
803 {
804         u_int32_t       loop_cnt = IXGB_MAX_INTR;
805         u_int32_t       reg_icr;
806         struct ifnet   *ifp;
807         struct adapter *adapter = arg;
808         boolean_t       rxdmt0 = FALSE;
809
810         IXGB_LOCK(adapter);
811
812         ifp = adapter->ifp;
813
814 #ifdef DEVICE_POLLING
815         if (ifp->if_capenable & IFCAP_POLLING) {
816                 IXGB_UNLOCK(adapter);
817                 return;
818         }
819 #endif
820
821         reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
822         if (reg_icr == 0) {
823                 IXGB_UNLOCK(adapter);
824                 return;
825         }
826
827         if (reg_icr & IXGB_INT_RXDMT0)
828                 rxdmt0 = TRUE;
829
830 #ifdef _SV_
831         if (reg_icr & IXGB_INT_RXDMT0)
832                 adapter->sv_stats.icr_rxdmt0++;
833         if (reg_icr & IXGB_INT_RXO)
834                 adapter->sv_stats.icr_rxo++;
835         if (reg_icr & IXGB_INT_RXT0)
836                 adapter->sv_stats.icr_rxt0++;
837         if (reg_icr & IXGB_INT_TXDW)
838                 adapter->sv_stats.icr_TXDW++;
839 #endif                          /* _SV_ */
840
841         /* Link status change */
842         if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
843                 ixgb_check_for_link(&adapter->hw);
844                 ixgb_print_link_status(adapter);
845         }
846         while (loop_cnt > 0) {
847                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
848                         ixgb_process_receive_interrupts(adapter, -1);
849                         ixgb_clean_transmit_interrupts(adapter);
850                 }
851                 loop_cnt--;
852         }
853
854         if (rxdmt0 && adapter->raidc) {
855                 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
856                 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
857         }
858         if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
859                 ixgb_start_locked(ifp);
860
861         IXGB_UNLOCK(adapter);
862         return;
863 }
864
865
866 /*********************************************************************
867  *
868  *  Media Ioctl callback
869  *
870  *  This routine is called whenever the user queries the status of
871  *  the interface using ifconfig.
872  *
873  **********************************************************************/
874 static void
875 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
876 {
877         struct adapter *adapter = ifp->if_softc;
878
879         INIT_DEBUGOUT("ixgb_media_status: begin");
880
881         ixgb_check_for_link(&adapter->hw);
882         ixgb_print_link_status(adapter);
883
884         ifmr->ifm_status = IFM_AVALID;
885         ifmr->ifm_active = IFM_ETHER;
886
887         if (!adapter->hw.link_up)
888                 return;
889
890         ifmr->ifm_status |= IFM_ACTIVE;
891         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
892
893         return;
894 }
895
896 /*********************************************************************
897  *
898  *  Media Ioctl callback
899  *
900  *  This routine is called when the user changes speed/duplex using
901  *  media/mediopt option with ifconfig.
902  *
903  **********************************************************************/
904 static int
905 ixgb_media_change(struct ifnet * ifp)
906 {
907         struct adapter *adapter = ifp->if_softc;
908         struct ifmedia *ifm = &adapter->media;
909
910         INIT_DEBUGOUT("ixgb_media_change: begin");
911
912         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
913                 return (EINVAL);
914
915         return (0);
916 }
917
918 /*********************************************************************
919  *
920  *  This routine maps the mbufs to tx descriptors.
921  *
922  *  return 0 on success, positive on failure
923  **********************************************************************/
924
925 static int
926 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
927 {
928         u_int8_t        txd_popts;
929         int             i, j, error, nsegs;
930
931 #if __FreeBSD_version < 500000
932         struct ifvlan  *ifv = NULL;
933 #endif
934         bus_dma_segment_t segs[IXGB_MAX_SCATTER];
935         bus_dmamap_t    map;
936         struct ixgb_buffer *tx_buffer = NULL;
937         struct ixgb_tx_desc *current_tx_desc = NULL;
938         struct ifnet   *ifp = adapter->ifp;
939
940         /*
941          * Force a cleanup if number of TX descriptors available hits the
942          * threshold
943          */
944         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
945                 ixgb_clean_transmit_interrupts(adapter);
946         }
947         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
948                 adapter->no_tx_desc_avail1++;
949                 return (ENOBUFS);
950         }
951         /*
952          * Map the packet for DMA.
953          */
954         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
955                 adapter->no_tx_map_avail++;
956                 return (ENOMEM);
957         }
958         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
959                                         &nsegs, BUS_DMA_NOWAIT);
960         if (error != 0) {
961                 adapter->no_tx_dma_setup++;
962                 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
963                        "error %u\n", error);
964                 bus_dmamap_destroy(adapter->txtag, map);
965                 return (error);
966         }
967         KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
968
969         if (nsegs > adapter->num_tx_desc_avail) {
970                 adapter->no_tx_desc_avail2++;
971                 bus_dmamap_destroy(adapter->txtag, map);
972                 return (ENOBUFS);
973         }
974         if (ifp->if_hwassist > 0) {
975                 ixgb_transmit_checksum_setup(adapter, m_head,
976                                              &txd_popts);
977         } else
978                 txd_popts = 0;
979
980         /* Find out if we are in vlan mode */
981 #if __FreeBSD_version < 500000
982         if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
983             m_head->m_pkthdr.rcvif != NULL &&
984             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
985                 ifv = m_head->m_pkthdr.rcvif->if_softc;
986 #elseif __FreeBSD_version < 700000
987         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
988 #endif
989         i = adapter->next_avail_tx_desc;
990         for (j = 0; j < nsegs; j++) {
991                 tx_buffer = &adapter->tx_buffer_area[i];
992                 current_tx_desc = &adapter->tx_desc_base[i];
993
994                 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
995                 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
996                 current_tx_desc->popts = txd_popts;
997                 if (++i == adapter->num_tx_desc)
998                         i = 0;
999
1000                 tx_buffer->m_head = NULL;
1001         }
1002
1003         adapter->num_tx_desc_avail -= nsegs;
1004         adapter->next_avail_tx_desc = i;
1005
1006 #if __FreeBSD_version < 500000
1007         if (ifv != NULL) {
1008                 /* Set the vlan id */
1009                 current_tx_desc->vlan = ifv->ifv_tag;
1010 #elseif __FreeBSD_version < 700000
1011         if (mtag != NULL) {
1012                 /* Set the vlan id */
1013                 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1014 #else
1015         if (m_head->m_flags & M_VLANTAG) {
1016                 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1017 #endif
1018
1019                 /* Tell hardware to add tag */
1020                 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1021         }
1022         tx_buffer->m_head = m_head;
1023         tx_buffer->map = map;
1024         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1025
1026         /*
1027          * Last Descriptor of Packet needs End Of Packet (EOP)
1028          */
1029         current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1030
1031         /*
1032          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1033          * that this frame is available to transmit.
1034          */
1035         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1036
1037         return (0);
1038 }
1039
1040 static void
1041 ixgb_set_promisc(struct adapter * adapter)
1042 {
1043
1044         u_int32_t       reg_rctl;
1045         struct ifnet   *ifp = adapter->ifp;
1046
1047         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1048
1049         if (ifp->if_flags & IFF_PROMISC) {
1050                 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1051                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1052         } else if (ifp->if_flags & IFF_ALLMULTI) {
1053                 reg_rctl |= IXGB_RCTL_MPE;
1054                 reg_rctl &= ~IXGB_RCTL_UPE;
1055                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1056         }
1057         return;
1058 }
1059
1060 static void
1061 ixgb_disable_promisc(struct adapter * adapter)
1062 {
1063         u_int32_t       reg_rctl;
1064
1065         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1066
1067         reg_rctl &= (~IXGB_RCTL_UPE);
1068         reg_rctl &= (~IXGB_RCTL_MPE);
1069         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1070
1071         return;
1072 }
1073
1074
1075 /*********************************************************************
1076  *  Multicast Update
1077  *
1078  *  This routine is called whenever multicast address list is updated.
1079  *
1080  **********************************************************************/
1081
1082 static void
1083 ixgb_set_multi(struct adapter * adapter)
1084 {
1085         u_int32_t       reg_rctl = 0;
1086         u_int8_t        *mta;
1087         struct ifmultiaddr *ifma;
1088         int             mcnt = 0;
1089         struct ifnet   *ifp = adapter->ifp;
1090
1091         IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1092
1093         mta = adapter->mta;
1094         bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1095             MAX_NUM_MULTICAST_ADDRESSES);
1096
1097         if_maddr_rlock(ifp);
1098 #if __FreeBSD_version < 500000
1099         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1100 #else
1101         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1102 #endif
1103                 if (ifma->ifma_addr->sa_family != AF_LINK)
1104                         continue;
1105
1106                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1107                       &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1108                 mcnt++;
1109         }
1110         if_maddr_runlock(ifp);
1111
1112         if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1113                 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1114                 reg_rctl |= IXGB_RCTL_MPE;
1115                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1116         } else
1117                 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1118
1119         return;
1120 }
1121
1122
1123 /*********************************************************************
1124  *  Timer routine
1125  *
1126  *  This routine checks for link status and updates statistics.
1127  *
1128  **********************************************************************/
1129
1130 static void
1131 ixgb_local_timer(void *arg)
1132 {
1133         struct ifnet   *ifp;
1134         struct adapter *adapter = arg;
1135         ifp = adapter->ifp;
1136
1137         IXGB_LOCK_ASSERT(adapter);
1138
1139         ixgb_check_for_link(&adapter->hw);
1140         ixgb_print_link_status(adapter);
1141         ixgb_update_stats_counters(adapter);
1142         if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1143                 ixgb_print_hw_stats(adapter);
1144         }
1145         if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1146                 ixgb_watchdog(adapter);
1147         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1148 }
1149
1150 static void
1151 ixgb_print_link_status(struct adapter * adapter)
1152 {
1153         if (adapter->hw.link_up) {
1154                 if (!adapter->link_active) {
1155                         if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1156                                10000,
1157                                "Full Duplex");
1158                         adapter->link_active = 1;
1159                 }
1160         } else {
1161                 if (adapter->link_active) {
1162                         if_printf(adapter->ifp, "Link is Down \n");
1163                         adapter->link_active = 0;
1164                 }
1165         }
1166
1167         return;
1168 }
1169
1170
1171
1172 /*********************************************************************
1173  *
1174  *  This routine disables all traffic on the adapter by issuing a
1175  *  global reset on the MAC and deallocates TX/RX buffers.
1176  *
1177  **********************************************************************/
1178
1179 static void
1180 ixgb_stop(void *arg)
1181 {
1182         struct ifnet   *ifp;
1183         struct adapter *adapter = arg;
1184         ifp = adapter->ifp;
1185
1186         IXGB_LOCK_ASSERT(adapter);
1187
1188         INIT_DEBUGOUT("ixgb_stop: begin\n");
1189         ixgb_disable_intr(adapter);
1190         adapter->hw.adapter_stopped = FALSE;
1191         ixgb_adapter_stop(&adapter->hw);
1192         callout_stop(&adapter->timer);
1193         ixgb_free_transmit_structures(adapter);
1194         ixgb_free_receive_structures(adapter);
1195
1196         /* Tell the stack that the interface is no longer active */
1197         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1198         adapter->tx_timer = 0;
1199
1200         return;
1201 }
1202
1203
1204 /*********************************************************************
1205  *
1206  *  Determine hardware revision.
1207  *
1208  **********************************************************************/
1209 static void
1210 ixgb_identify_hardware(struct adapter * adapter)
1211 {
1212         device_t        dev = adapter->dev;
1213
1214         /* Make sure our PCI config space has the necessary stuff set */
1215         pci_enable_busmaster(dev);
1216         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1217
1218         /* Save off the information about this board */
1219         adapter->hw.vendor_id = pci_get_vendor(dev);
1220         adapter->hw.device_id = pci_get_device(dev);
1221         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1222         adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1223         adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1224
1225         /* Set MacType, etc. based on this PCI info */
1226         switch (adapter->hw.device_id) {
1227         case IXGB_DEVICE_ID_82597EX:
1228         case IXGB_DEVICE_ID_82597EX_SR:
1229                 adapter->hw.mac_type = ixgb_82597;
1230                 break;
1231         default:
1232                 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1233                 device_printf(dev, "unsupported device id 0x%x\n",
1234                     adapter->hw.device_id);
1235         }
1236
1237         return;
1238 }
1239
1240 static int
1241 ixgb_allocate_pci_resources(struct adapter * adapter)
1242 {
1243         int             rid;
1244         device_t        dev = adapter->dev;
1245
1246         rid = IXGB_MMBA;
1247         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1248                                                  &rid,
1249                                                  RF_ACTIVE);
1250         if (!(adapter->res_memory)) {
1251                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1252                 return (ENXIO);
1253         }
1254         adapter->osdep.mem_bus_space_tag =
1255                 rman_get_bustag(adapter->res_memory);
1256         adapter->osdep.mem_bus_space_handle =
1257                 rman_get_bushandle(adapter->res_memory);
1258         adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1259
1260         rid = 0x0;
1261         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1262                                                         &rid,
1263                                                         RF_SHAREABLE | RF_ACTIVE);
1264         if (!(adapter->res_interrupt)) {
1265                 device_printf(dev,
1266                     "Unable to allocate bus resource: interrupt\n");
1267                 return (ENXIO);
1268         }
1269         if (bus_setup_intr(dev, adapter->res_interrupt,
1270                            INTR_TYPE_NET | INTR_MPSAFE,
1271                            NULL, (void (*) (void *))ixgb_intr, adapter,
1272                            &adapter->int_handler_tag)) {
1273                 device_printf(dev, "Error registering interrupt handler!\n");
1274                 return (ENXIO);
1275         }
1276         adapter->hw.back = &adapter->osdep;
1277
1278         return (0);
1279 }
1280
1281 static void
1282 ixgb_free_pci_resources(struct adapter * adapter)
1283 {
1284         device_t        dev = adapter->dev;
1285
1286         if (adapter->res_interrupt != NULL) {
1287                 bus_teardown_intr(dev, adapter->res_interrupt,
1288                                   adapter->int_handler_tag);
1289                 bus_release_resource(dev, SYS_RES_IRQ, 0,
1290                                      adapter->res_interrupt);
1291         }
1292         if (adapter->res_memory != NULL) {
1293                 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1294                                      adapter->res_memory);
1295         }
1296         if (adapter->res_ioport != NULL) {
1297                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1298                                      adapter->res_ioport);
1299         }
1300         return;
1301 }
1302
1303 /*********************************************************************
1304  *
1305  *  Initialize the hardware to a configuration as specified by the
1306  *  adapter structure. The controller is reset, the EEPROM is
1307  *  verified, the MAC address is set, then the shared initialization
1308  *  routines are called.
1309  *
1310  **********************************************************************/
1311 static int
1312 ixgb_hardware_init(struct adapter * adapter)
1313 {
1314         /* Issue a global reset */
1315         adapter->hw.adapter_stopped = FALSE;
1316         ixgb_adapter_stop(&adapter->hw);
1317
1318         /* Make sure we have a good EEPROM before we read from it */
1319         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1320                 device_printf(adapter->dev,
1321                     "The EEPROM Checksum Is Not Valid\n");
1322                 return (EIO);
1323         }
1324         if (!ixgb_init_hw(&adapter->hw)) {
1325                 device_printf(adapter->dev, "Hardware Initialization Failed");
1326                 return (EIO);
1327         }
1328
1329         return (0);
1330 }
1331
1332 /*********************************************************************
1333  *
1334  *  Setup networking device structure and register an interface.
1335  *
1336  **********************************************************************/
1337 static int
1338 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1339 {
1340         struct ifnet   *ifp;
1341         INIT_DEBUGOUT("ixgb_setup_interface: begin");
1342
1343         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1344         if (ifp == NULL) {
1345                 device_printf(dev, "can not allocate ifnet structure\n");
1346                 return (-1);
1347         }
1348 #if __FreeBSD_version >= 502000
1349         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1350 #else
1351         ifp->if_unit = device_get_unit(dev);
1352         ifp->if_name = "ixgb";
1353 #endif
1354         ifp->if_baudrate = 1000000000;
1355         ifp->if_init = ixgb_init;
1356         ifp->if_softc = adapter;
1357         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1358         ifp->if_ioctl = ixgb_ioctl;
1359         ifp->if_start = ixgb_start;
1360         ifp->if_get_counter = ixgb_get_counter;
1361         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1362
1363 #if __FreeBSD_version < 500000
1364         ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1365 #else
1366         ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1367 #endif
1368
1369         ifp->if_capabilities = IFCAP_HWCSUM;
1370
1371         /*
1372          * Tell the upper layer(s) we support long frames.
1373          */
1374         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1375
1376 #if __FreeBSD_version >= 500000
1377         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1378 #endif
1379
1380         ifp->if_capenable = ifp->if_capabilities;
1381
1382 #ifdef DEVICE_POLLING
1383         ifp->if_capabilities |= IFCAP_POLLING;
1384 #endif
1385
1386         /*
1387          * Specify the media types supported by this adapter and register
1388          * callbacks to update media and link information
1389          */
1390         ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1391                      ixgb_media_status);
1392         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1393                     0, NULL);
1394         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1395                     0, NULL);
1396         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1397         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1398
1399         return (0);
1400 }
1401
1402 /********************************************************************
1403  * Manage DMA'able memory.
1404  *******************************************************************/
1405 static void
1406 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1407 {
1408         if (error)
1409                 return;
1410         *(bus_addr_t *) arg = segs->ds_addr;
1411         return;
1412 }
1413
1414 static int
1415 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1416                 struct ixgb_dma_alloc * dma, int mapflags)
1417 {
1418         device_t dev;
1419         int             r;
1420
1421         dev = adapter->dev;
1422         r = bus_dma_tag_create(bus_get_dma_tag(dev),    /* parent */
1423                                PAGE_SIZE, 0,    /* alignment, bounds */
1424                                BUS_SPACE_MAXADDR,       /* lowaddr */
1425                                BUS_SPACE_MAXADDR,       /* highaddr */
1426                                NULL, NULL,      /* filter, filterarg */
1427                                size,    /* maxsize */
1428                                1,       /* nsegments */
1429                                size,    /* maxsegsize */
1430                                BUS_DMA_ALLOCNOW,        /* flags */
1431 #if __FreeBSD_version >= 502000
1432                                NULL,    /* lockfunc */
1433                                NULL,    /* lockfuncarg */
1434 #endif
1435                                &dma->dma_tag);
1436         if (r != 0) {
1437                 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1438                        "error %u\n", r);
1439                 goto fail_0;
1440         }
1441         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1442                              BUS_DMA_NOWAIT, &dma->dma_map);
1443         if (r != 0) {
1444                 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1445                        "error %u\n", r);
1446                 goto fail_1;
1447         }
1448         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1449                             size,
1450                             ixgb_dmamap_cb,
1451                             &dma->dma_paddr,
1452                             mapflags | BUS_DMA_NOWAIT);
1453         if (r != 0) {
1454                 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1455                        "error %u\n", r);
1456                 goto fail_2;
1457         }
1458         dma->dma_size = size;
1459         return (0);
1460 fail_2:
1461         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1462 fail_1:
1463         bus_dma_tag_destroy(dma->dma_tag);
1464 fail_0:
1465         dma->dma_tag = NULL;
1466         return (r);
1467 }
1468
1469
1470
1471 static void
1472 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1473 {
1474         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1475         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1476         bus_dma_tag_destroy(dma->dma_tag);
1477 }
1478
1479 /*********************************************************************
1480  *
1481  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1482  *  the information needed to transmit a packet on the wire.
1483  *
1484  **********************************************************************/
1485 static int
1486 ixgb_allocate_transmit_structures(struct adapter * adapter)
1487 {
1488         if (!(adapter->tx_buffer_area =
1489               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1490                                             adapter->num_tx_desc, M_DEVBUF,
1491                                             M_NOWAIT | M_ZERO))) {
1492                 device_printf(adapter->dev,
1493                     "Unable to allocate tx_buffer memory\n");
1494                 return ENOMEM;
1495         }
1496         bzero(adapter->tx_buffer_area,
1497               sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1498
1499         return 0;
1500 }
1501
1502 /*********************************************************************
1503  *
1504  *  Allocate and initialize transmit structures.
1505  *
1506  **********************************************************************/
1507 static int
1508 ixgb_setup_transmit_structures(struct adapter * adapter)
1509 {
1510         /*
1511          * Setup DMA descriptor areas.
1512          */
1513         if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
1514                                PAGE_SIZE, 0,    /* alignment, bounds */
1515                                BUS_SPACE_MAXADDR,       /* lowaddr */
1516                                BUS_SPACE_MAXADDR,       /* highaddr */
1517                                NULL, NULL,      /* filter, filterarg */
1518                                MCLBYTES * IXGB_MAX_SCATTER,     /* maxsize */
1519                                IXGB_MAX_SCATTER,        /* nsegments */
1520                                MCLBYTES,        /* maxsegsize */
1521                                BUS_DMA_ALLOCNOW,        /* flags */
1522 #if __FreeBSD_version >= 502000
1523                                NULL,    /* lockfunc */
1524                                NULL,    /* lockfuncarg */
1525 #endif
1526                                &adapter->txtag)) {
1527                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1528                 return (ENOMEM);
1529         }
1530         if (ixgb_allocate_transmit_structures(adapter))
1531                 return ENOMEM;
1532
1533         bzero((void *)adapter->tx_desc_base,
1534               (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1535
1536         adapter->next_avail_tx_desc = 0;
1537         adapter->oldest_used_tx_desc = 0;
1538
1539         /* Set number of descriptors available */
1540         adapter->num_tx_desc_avail = adapter->num_tx_desc;
1541
1542         /* Set checksum context */
1543         adapter->active_checksum_context = OFFLOAD_NONE;
1544
1545         return 0;
1546 }
1547
1548 /*********************************************************************
1549  *
1550  *  Enable transmit unit.
1551  *
1552  **********************************************************************/
1553 static void
1554 ixgb_initialize_transmit_unit(struct adapter * adapter)
1555 {
1556         u_int32_t       reg_tctl;
1557         u_int64_t       tdba = adapter->txdma.dma_paddr;
1558
1559         /* Setup the Base and Length of the Tx Descriptor Ring */
1560         IXGB_WRITE_REG(&adapter->hw, TDBAL,
1561                        (tdba & 0x00000000ffffffffULL));
1562         IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1563         IXGB_WRITE_REG(&adapter->hw, TDLEN,
1564                        adapter->num_tx_desc *
1565                        sizeof(struct ixgb_tx_desc));
1566
1567         /* Setup the HW Tx Head and Tail descriptor pointers */
1568         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1569         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1570
1571
1572         HW_DEBUGOUT2("Base = %x, Length = %x\n",
1573                      IXGB_READ_REG(&adapter->hw, TDBAL),
1574                      IXGB_READ_REG(&adapter->hw, TDLEN));
1575
1576         IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1577
1578
1579         /* Program the Transmit Control Register */
1580         reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1581         reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1582         IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1583
1584         /* Setup Transmit Descriptor Settings for this adapter */
1585         adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1586
1587         if (adapter->tx_int_delay > 0)
1588                 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1589         return;
1590 }
1591
1592 /*********************************************************************
1593  *
1594  *  Free all transmit related data structures.
1595  *
1596  **********************************************************************/
1597 static void
1598 ixgb_free_transmit_structures(struct adapter * adapter)
1599 {
1600         struct ixgb_buffer *tx_buffer;
1601         int             i;
1602
1603         INIT_DEBUGOUT("free_transmit_structures: begin");
1604
1605         if (adapter->tx_buffer_area != NULL) {
1606                 tx_buffer = adapter->tx_buffer_area;
1607                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1608                         if (tx_buffer->m_head != NULL) {
1609                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1610                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1611                                 m_freem(tx_buffer->m_head);
1612                         }
1613                         tx_buffer->m_head = NULL;
1614                 }
1615         }
1616         if (adapter->tx_buffer_area != NULL) {
1617                 free(adapter->tx_buffer_area, M_DEVBUF);
1618                 adapter->tx_buffer_area = NULL;
1619         }
1620         if (adapter->txtag != NULL) {
1621                 bus_dma_tag_destroy(adapter->txtag);
1622                 adapter->txtag = NULL;
1623         }
1624         return;
1625 }
1626
1627 /*********************************************************************
1628  *
1629  *  The offload context needs to be set when we transfer the first
1630  *  packet of a particular protocol (TCP/UDP). We change the
1631  *  context only if the protocol type changes.
1632  *
1633  **********************************************************************/
1634 static void
1635 ixgb_transmit_checksum_setup(struct adapter * adapter,
1636                              struct mbuf * mp,
1637                              u_int8_t * txd_popts)
1638 {
1639         struct ixgb_context_desc *TXD;
1640         struct ixgb_buffer *tx_buffer;
1641         int             curr_txd;
1642
1643         if (mp->m_pkthdr.csum_flags) {
1644
1645                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1646                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1647                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1648                                 return;
1649                         else
1650                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1651                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1652                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1653                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1654                                 return;
1655                         else
1656                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1657                 } else {
1658                         *txd_popts = 0;
1659                         return;
1660                 }
1661         } else {
1662                 *txd_popts = 0;
1663                 return;
1664         }
1665
1666         /*
1667          * If we reach this point, the checksum offload context needs to be
1668          * reset.
1669          */
1670         curr_txd = adapter->next_avail_tx_desc;
1671         tx_buffer = &adapter->tx_buffer_area[curr_txd];
1672         TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1673
1674
1675         TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1676         TXD->tucse = 0;
1677
1678         TXD->mss = 0;
1679
1680         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1681                 TXD->tucso =
1682                         ENET_HEADER_SIZE + sizeof(struct ip) +
1683                         offsetof(struct tcphdr, th_sum);
1684         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1685                 TXD->tucso =
1686                         ENET_HEADER_SIZE + sizeof(struct ip) +
1687                         offsetof(struct udphdr, uh_sum);
1688         }
1689         TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1690
1691         tx_buffer->m_head = NULL;
1692
1693         if (++curr_txd == adapter->num_tx_desc)
1694                 curr_txd = 0;
1695
1696         adapter->num_tx_desc_avail--;
1697         adapter->next_avail_tx_desc = curr_txd;
1698         return;
1699 }
1700
1701 /**********************************************************************
1702  *
1703  *  Examine each tx_buffer in the used queue. If the hardware is done
1704  *  processing the packet then free associated resources. The
1705  *  tx_buffer is put back on the free queue.
1706  *
1707  **********************************************************************/
1708 static void
1709 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1710 {
1711         int             i, num_avail;
1712         struct ixgb_buffer *tx_buffer;
1713         struct ixgb_tx_desc *tx_desc;
1714
1715         IXGB_LOCK_ASSERT(adapter);
1716
1717         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1718                 return;
1719
1720 #ifdef _SV_
1721         adapter->clean_tx_interrupts++;
1722 #endif
1723         num_avail = adapter->num_tx_desc_avail;
1724         i = adapter->oldest_used_tx_desc;
1725
1726         tx_buffer = &adapter->tx_buffer_area[i];
1727         tx_desc = &adapter->tx_desc_base[i];
1728
1729         while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1730
1731                 tx_desc->status = 0;
1732                 num_avail++;
1733
1734                 if (tx_buffer->m_head) {
1735                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1736                                         BUS_DMASYNC_POSTWRITE);
1737                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1738                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1739                         m_freem(tx_buffer->m_head);
1740                         tx_buffer->m_head = NULL;
1741                 }
1742                 if (++i == adapter->num_tx_desc)
1743                         i = 0;
1744
1745                 tx_buffer = &adapter->tx_buffer_area[i];
1746                 tx_desc = &adapter->tx_desc_base[i];
1747         }
1748
1749         adapter->oldest_used_tx_desc = i;
1750
1751         /*
1752          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1753          * it is OK to send packets. If there are no pending descriptors,
1754          * clear the timeout. Otherwise, if some descriptors have been freed,
1755          * restart the timeout.
1756          */
1757         if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1758                 struct ifnet   *ifp = adapter->ifp;
1759
1760                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1761                 if (num_avail == adapter->num_tx_desc)
1762                         adapter->tx_timer = 0;
1763                 else if (num_avail == adapter->num_tx_desc_avail)
1764                         adapter->tx_timer = IXGB_TX_TIMEOUT;
1765         }
1766         adapter->num_tx_desc_avail = num_avail;
1767         return;
1768 }
1769
1770
1771 /*********************************************************************
1772  *
1773  *  Get a buffer from system mbuf buffer pool.
1774  *
1775  **********************************************************************/
1776 static int
1777 ixgb_get_buf(int i, struct adapter * adapter,
1778              struct mbuf * nmp)
1779 {
1780         register struct mbuf *mp = nmp;
1781         struct ixgb_buffer *rx_buffer;
1782         struct ifnet   *ifp;
1783         bus_addr_t      paddr;
1784         int             error;
1785
1786         ifp = adapter->ifp;
1787
1788         if (mp == NULL) {
1789
1790                 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1791
1792                 if (mp == NULL) {
1793                         adapter->mbuf_alloc_failed++;
1794                         return (ENOBUFS);
1795                 }
1796                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797         } else {
1798                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1799                 mp->m_data = mp->m_ext.ext_buf;
1800                 mp->m_next = NULL;
1801         }
1802
1803         if (ifp->if_mtu <= ETHERMTU) {
1804                 m_adj(mp, ETHER_ALIGN);
1805         }
1806         rx_buffer = &adapter->rx_buffer_area[i];
1807
1808         /*
1809          * Using memory from the mbuf cluster pool, invoke the bus_dma
1810          * machinery to arrange the memory mapping.
1811          */
1812         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1813                                 mtod(mp, void *), mp->m_len,
1814                                 ixgb_dmamap_cb, &paddr, 0);
1815         if (error) {
1816                 m_free(mp);
1817                 return (error);
1818         }
1819         rx_buffer->m_head = mp;
1820         adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1821         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1822
1823         return (0);
1824 }
1825
1826 /*********************************************************************
1827  *
1828  *  Allocate memory for rx_buffer structures. Since we use one
1829  *  rx_buffer per received packet, the maximum number of rx_buffer's
1830  *  that we'll need is equal to the number of receive descriptors
1831  *  that we've allocated.
1832  *
1833  **********************************************************************/
1834 static int
1835 ixgb_allocate_receive_structures(struct adapter * adapter)
1836 {
1837         int             i, error;
1838         struct ixgb_buffer *rx_buffer;
1839
1840         if (!(adapter->rx_buffer_area =
1841               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1842                                             adapter->num_rx_desc, M_DEVBUF,
1843                                             M_NOWAIT | M_ZERO))) {
1844                 device_printf(adapter->dev,
1845                     "Unable to allocate rx_buffer memory\n");
1846                 return (ENOMEM);
1847         }
1848         bzero(adapter->rx_buffer_area,
1849               sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1850
1851         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1852                                    PAGE_SIZE, 0,        /* alignment, bounds */
1853                                    BUS_SPACE_MAXADDR,   /* lowaddr */
1854                                    BUS_SPACE_MAXADDR,   /* highaddr */
1855                                    NULL, NULL,  /* filter, filterarg */
1856                                    MCLBYTES,    /* maxsize */
1857                                    1,   /* nsegments */
1858                                    MCLBYTES,    /* maxsegsize */
1859                                    BUS_DMA_ALLOCNOW,    /* flags */
1860 #if __FreeBSD_version >= 502000
1861                                    NULL,        /* lockfunc */
1862                                    NULL,        /* lockfuncarg */
1863 #endif
1864                                    &adapter->rxtag);
1865         if (error != 0) {
1866                 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1867                        "bus_dma_tag_create failed; error %u\n",
1868                        error);
1869                 goto fail_0;
1870         }
1871         rx_buffer = adapter->rx_buffer_area;
1872         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1873                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1874                                           &rx_buffer->map);
1875                 if (error != 0) {
1876                         device_printf(adapter->dev,
1877                                "ixgb_allocate_receive_structures: "
1878                                "bus_dmamap_create failed; error %u\n",
1879                                error);
1880                         goto fail_1;
1881                 }
1882         }
1883
1884         for (i = 0; i < adapter->num_rx_desc; i++) {
1885                 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1886                         adapter->rx_buffer_area[i].m_head = NULL;
1887                         adapter->rx_desc_base[i].buff_addr = 0;
1888                         return (ENOBUFS);
1889                 }
1890         }
1891
1892         return (0);
1893 fail_1:
1894         bus_dma_tag_destroy(adapter->rxtag);
1895 fail_0:
1896         adapter->rxtag = NULL;
1897         free(adapter->rx_buffer_area, M_DEVBUF);
1898         adapter->rx_buffer_area = NULL;
1899         return (error);
1900 }
1901
1902 /*********************************************************************
1903  *
1904  *  Allocate and initialize receive structures.
1905  *
1906  **********************************************************************/
1907 static int
1908 ixgb_setup_receive_structures(struct adapter * adapter)
1909 {
1910         bzero((void *)adapter->rx_desc_base,
1911               (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1912
1913         if (ixgb_allocate_receive_structures(adapter))
1914                 return ENOMEM;
1915
1916         /* Setup our descriptor pointers */
1917         adapter->next_rx_desc_to_check = 0;
1918         adapter->next_rx_desc_to_use = 0;
1919         return (0);
1920 }
1921
1922 /*********************************************************************
1923  *
1924  *  Enable receive unit.
1925  *
1926  **********************************************************************/
1927 static void
1928 ixgb_initialize_receive_unit(struct adapter * adapter)
1929 {
1930         u_int32_t       reg_rctl;
1931         u_int32_t       reg_rxcsum;
1932         u_int32_t       reg_rxdctl;
1933         struct ifnet   *ifp;
1934         u_int64_t       rdba = adapter->rxdma.dma_paddr;
1935
1936         ifp = adapter->ifp;
1937
1938         /*
1939          * Make sure receives are disabled while setting up the descriptor
1940          * ring
1941          */
1942         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1943         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1944
1945         /* Set the Receive Delay Timer Register */
1946         IXGB_WRITE_REG(&adapter->hw, RDTR,
1947                        adapter->rx_int_delay);
1948
1949
1950         /* Setup the Base and Length of the Rx Descriptor Ring */
1951         IXGB_WRITE_REG(&adapter->hw, RDBAL,
1952                        (rdba & 0x00000000ffffffffULL));
1953         IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1954         IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1955                        sizeof(struct ixgb_rx_desc));
1956
1957         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1958         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1959
1960         IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1961
1962
1963
1964         reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1965                 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1966                 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1967         IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1968
1969
1970         adapter->raidc = 1;
1971         if (adapter->raidc) {
1972                 uint32_t        raidc;
1973                 uint8_t         poll_threshold;
1974 #define IXGB_RAIDC_POLL_DEFAULT 120
1975
1976                 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1977                 poll_threshold >>= 1;
1978                 poll_threshold &= 0x3F;
1979                 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1980                         (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1981                         (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1982                         poll_threshold;
1983                 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1984         }
1985         /* Enable Receive Checksum Offload for TCP and UDP ? */
1986         if (ifp->if_capenable & IFCAP_RXCSUM) {
1987                 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1988                 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1989                 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1990         }
1991         /* Setup the Receive Control Register */
1992         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1993         reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1994         reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1995                 IXGB_RCTL_CFF |
1996                 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1997
1998         switch (adapter->rx_buffer_len) {
1999         default:
2000         case IXGB_RXBUFFER_2048:
2001                 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2002                 break;
2003         case IXGB_RXBUFFER_4096:
2004                 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2005                 break;
2006         case IXGB_RXBUFFER_8192:
2007                 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2008                 break;
2009         case IXGB_RXBUFFER_16384:
2010                 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2011                 break;
2012         }
2013
2014         reg_rctl |= IXGB_RCTL_RXEN;
2015
2016
2017         /* Enable Receives */
2018         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2019
2020         return;
2021 }
2022
2023 /*********************************************************************
2024  *
2025  *  Free receive related data structures.
2026  *
2027  **********************************************************************/
2028 static void
2029 ixgb_free_receive_structures(struct adapter * adapter)
2030 {
2031         struct ixgb_buffer *rx_buffer;
2032         int             i;
2033
2034         INIT_DEBUGOUT("free_receive_structures: begin");
2035
2036         if (adapter->rx_buffer_area != NULL) {
2037                 rx_buffer = adapter->rx_buffer_area;
2038                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2039                         if (rx_buffer->map != NULL) {
2040                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2041                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2042                         }
2043                         if (rx_buffer->m_head != NULL)
2044                                 m_freem(rx_buffer->m_head);
2045                         rx_buffer->m_head = NULL;
2046                 }
2047         }
2048         if (adapter->rx_buffer_area != NULL) {
2049                 free(adapter->rx_buffer_area, M_DEVBUF);
2050                 adapter->rx_buffer_area = NULL;
2051         }
2052         if (adapter->rxtag != NULL) {
2053                 bus_dma_tag_destroy(adapter->rxtag);
2054                 adapter->rxtag = NULL;
2055         }
2056         return;
2057 }
2058
2059 /*********************************************************************
2060  *
2061  *  This routine executes in interrupt context. It replenishes
2062  *  the mbufs in the descriptor and sends data which has been
2063  *  dma'ed into host memory to upper layer.
2064  *
2065  *  We loop at most count times if count is > 0, or until done if
2066  *  count < 0.
2067  *
2068  *********************************************************************/
2069 static int
2070 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2071 {
2072         struct ifnet   *ifp;
2073         struct mbuf    *mp;
2074 #if __FreeBSD_version < 500000
2075         struct ether_header *eh;
2076 #endif
2077         int             eop = 0;
2078         int             len;
2079         u_int8_t        accept_frame = 0;
2080         int             i;
2081         int             next_to_use = 0;
2082         int             eop_desc;
2083         int             rx_npkts = 0;
2084         /* Pointer to the receive descriptor being examined. */
2085         struct ixgb_rx_desc *current_desc;
2086
2087         IXGB_LOCK_ASSERT(adapter);
2088
2089         ifp = adapter->ifp;
2090         i = adapter->next_rx_desc_to_check;
2091         next_to_use = adapter->next_rx_desc_to_use;
2092         eop_desc = adapter->next_rx_desc_to_check;
2093         current_desc = &adapter->rx_desc_base[i];
2094
2095         if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2096 #ifdef _SV_
2097                 adapter->no_pkts_avail++;
2098 #endif
2099                 return (rx_npkts);
2100         }
2101         while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2102
2103                 mp = adapter->rx_buffer_area[i].m_head;
2104                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2105                                 BUS_DMASYNC_POSTREAD);
2106                 accept_frame = 1;
2107                 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2108                         count--;
2109                         eop = 1;
2110                 } else {
2111                         eop = 0;
2112                 }
2113                 len = current_desc->length;
2114
2115                 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2116                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2117                                             IXGB_RX_DESC_ERRORS_RXE)) {
2118                         accept_frame = 0;
2119                 }
2120                 if (accept_frame) {
2121
2122                         /* Assign correct length to the current fragment */
2123                         mp->m_len = len;
2124
2125                         if (adapter->fmp == NULL) {
2126                                 mp->m_pkthdr.len = len;
2127                                 adapter->fmp = mp;      /* Store the first mbuf */
2128                                 adapter->lmp = mp;
2129                         } else {
2130                                 /* Chain mbuf's together */
2131                                 mp->m_flags &= ~M_PKTHDR;
2132                                 adapter->lmp->m_next = mp;
2133                                 adapter->lmp = adapter->lmp->m_next;
2134                                 adapter->fmp->m_pkthdr.len += len;
2135                         }
2136
2137                         if (eop) {
2138                                 eop_desc = i;
2139                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2140
2141 #if __FreeBSD_version < 500000
2142                                 eh = mtod(adapter->fmp, struct ether_header *);
2143
2144                                 /* Remove ethernet header from mbuf */
2145                                 m_adj(adapter->fmp, sizeof(struct ether_header));
2146                                 ixgb_receive_checksum(adapter, current_desc,
2147                                                       adapter->fmp);
2148
2149                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2150                                         VLAN_INPUT_TAG(eh, adapter->fmp,
2151                                                      current_desc->special);
2152                                 else
2153                                         ether_input(ifp, eh, adapter->fmp);
2154 #else
2155                                 ixgb_receive_checksum(adapter, current_desc,
2156                                                       adapter->fmp);
2157 #if __FreeBSD_version < 700000
2158                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2159                                         VLAN_INPUT_TAG(ifp, adapter->fmp,
2160                                                        current_desc->special);
2161 #else
2162                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2163                                         adapter->fmp->m_pkthdr.ether_vtag =
2164                                             current_desc->special;
2165                                         adapter->fmp->m_flags |= M_VLANTAG;
2166                                 }
2167 #endif
2168
2169                                 if (adapter->fmp != NULL) {
2170                                         IXGB_UNLOCK(adapter);
2171                                         (*ifp->if_input) (ifp, adapter->fmp);
2172                                         IXGB_LOCK(adapter);
2173                                         rx_npkts++;
2174                                 }
2175 #endif
2176                                 adapter->fmp = NULL;
2177                                 adapter->lmp = NULL;
2178                         }
2179                         adapter->rx_buffer_area[i].m_head = NULL;
2180                 } else {
2181                         adapter->dropped_pkts++;
2182                         if (adapter->fmp != NULL)
2183                                 m_freem(adapter->fmp);
2184                         adapter->fmp = NULL;
2185                         adapter->lmp = NULL;
2186                 }
2187
2188                 /* Zero out the receive descriptors status  */
2189                 current_desc->status = 0;
2190
2191                 /* Advance our pointers to the next descriptor */
2192                 if (++i == adapter->num_rx_desc) {
2193                         i = 0;
2194                         current_desc = adapter->rx_desc_base;
2195                 } else
2196                         current_desc++;
2197         }
2198         adapter->next_rx_desc_to_check = i;
2199
2200         if (--i < 0)
2201                 i = (adapter->num_rx_desc - 1);
2202
2203         /*
2204          * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2205          * memory corruption). Avoid using and re-submitting the most recently received RX
2206          * descriptor back to hardware.
2207          *
2208          * if(Last written back descriptor == EOP bit set descriptor)
2209          *      then avoid re-submitting the most recently received RX descriptor 
2210          *      back to hardware.
2211          * if(Last written back descriptor != EOP bit set descriptor)
2212          *      then avoid re-submitting the most recently received RX descriptors
2213          *      till last EOP bit set descriptor. 
2214          */
2215         if (eop_desc != i) {
2216                 if (++eop_desc == adapter->num_rx_desc)
2217                         eop_desc = 0;
2218                 i = eop_desc;
2219         }
2220         /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2221         while (next_to_use != i) {
2222                 current_desc = &adapter->rx_desc_base[next_to_use];
2223                 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2224                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2225                                              IXGB_RX_DESC_ERRORS_RXE))) {
2226                         mp = adapter->rx_buffer_area[next_to_use].m_head;
2227                         ixgb_get_buf(next_to_use, adapter, mp);
2228                 } else {
2229                         if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2230                                 break;
2231                 }
2232                 /* Advance our pointers to the next descriptor */
2233                 if (++next_to_use == adapter->num_rx_desc) {
2234                         next_to_use = 0;
2235                         current_desc = adapter->rx_desc_base;
2236                 } else
2237                         current_desc++;
2238         }
2239         adapter->next_rx_desc_to_use = next_to_use;
2240         if (--next_to_use < 0)
2241                 next_to_use = (adapter->num_rx_desc - 1);
2242         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2243         IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2244
2245         return (rx_npkts);
2246 }
2247
2248 /*********************************************************************
2249  *
2250  *  Verify that the hardware indicated that the checksum is valid.
2251  *  Inform the stack about the status of checksum so that stack
2252  *  doesn't spend time verifying the checksum.
2253  *
2254  *********************************************************************/
2255 static void
2256 ixgb_receive_checksum(struct adapter * adapter,
2257                       struct ixgb_rx_desc * rx_desc,
2258                       struct mbuf * mp)
2259 {
2260         if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2261                 mp->m_pkthdr.csum_flags = 0;
2262                 return;
2263         }
2264         if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2265                 /* Did it pass? */
2266                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2267                         /* IP Checksum Good */
2268                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2269                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2270
2271                 } else {
2272                         mp->m_pkthdr.csum_flags = 0;
2273                 }
2274         }
2275         if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2276                 /* Did it pass? */
2277                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2278                         mp->m_pkthdr.csum_flags |=
2279                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2280                         mp->m_pkthdr.csum_data = htons(0xffff);
2281                 }
2282         }
2283         return;
2284 }
2285
2286
2287 static void
2288 ixgb_enable_vlans(struct adapter * adapter)
2289 {
2290         uint32_t        ctrl;
2291
2292         ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2293         ctrl |= IXGB_CTRL0_VME;
2294         IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2295
2296         return;
2297 }
2298
2299
2300 static void
2301 ixgb_enable_intr(struct adapter * adapter)
2302 {
2303         IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2304                             IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2305         return;
2306 }
2307
2308 static void
2309 ixgb_disable_intr(struct adapter * adapter)
2310 {
2311         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2312         return;
2313 }
2314
2315 void
2316 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2317                    uint32_t reg,
2318                    uint16_t * value)
2319 {
2320         pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2321                          *value, 2);
2322 }
2323
2324 /**********************************************************************
2325  *
2326  *  Update the board statistics counters.
2327  *
2328  **********************************************************************/
2329 static void
2330 ixgb_update_stats_counters(struct adapter * adapter)
2331 {
2332
2333         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2334         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2335         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2336         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2337         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2338         adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2339         adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2340         adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2341         adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2342         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2343
2344         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2345         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2346         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2347         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2348         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2349         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2350         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2351         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2352         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2353         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2354         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2355         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2356         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2357         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2358         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2359         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2360         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2361         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2362         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2363         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2364         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2365         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2366         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2367         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2368         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2369         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2370         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2371
2372         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2373         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2374         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2375         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2376         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2377         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2378         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2379         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2380         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2381         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2382         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2383         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2384         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2385         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2386         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2387         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2388         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2389         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2390         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2391         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2392         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2393         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2394 }
2395
2396 static uint64_t
2397 ixgb_get_counter(struct ifnet *ifp, ift_counter cnt)
2398 {
2399         struct adapter *adapter;
2400
2401         adapter = if_getsoftc(ifp);
2402
2403         switch (cnt) {
2404         case IFCOUNTER_IPACKETS:
2405                 return (adapter->stats.gprcl);
2406         case IFCOUNTER_OPACKETS:
2407                 return ( adapter->stats.gptcl);
2408         case IFCOUNTER_IBYTES:
2409                 return (adapter->stats.gorcl);
2410         case IFCOUNTER_OBYTES:
2411                 return (adapter->stats.gotcl);
2412         case IFCOUNTER_IMCASTS:
2413                 return ( adapter->stats.mprcl);
2414         case IFCOUNTER_COLLISIONS:
2415                 return (0);
2416         case IFCOUNTER_IERRORS:
2417                 return (adapter->dropped_pkts + adapter->stats.crcerrs +
2418                     adapter->stats.rnbc + adapter->stats.mpc +
2419                     adapter->stats.rlec);
2420         default:
2421                 return (if_get_counter_default(ifp, cnt));
2422         }
2423 }
2424
2425 /**********************************************************************
2426  *
2427  *  This routine is called only when ixgb_display_debug_stats is enabled.
2428  *  This routine provides a way to take a look at important statistics
2429  *  maintained by the driver and hardware.
2430  *
2431  **********************************************************************/
2432 static void
2433 ixgb_print_hw_stats(struct adapter * adapter)
2434 {
2435         char            buf_speed[100], buf_type[100];
2436         ixgb_bus_speed  bus_speed;
2437         ixgb_bus_type   bus_type;
2438         device_t dev;
2439
2440         dev = adapter->dev;
2441 #ifdef _SV_
2442         device_printf(dev, "Packets not Avail = %ld\n",
2443                adapter->no_pkts_avail);
2444         device_printf(dev, "CleanTxInterrupts = %ld\n",
2445                adapter->clean_tx_interrupts);
2446         device_printf(dev, "ICR RXDMT0 = %lld\n",
2447                (long long)adapter->sv_stats.icr_rxdmt0);
2448         device_printf(dev, "ICR RXO = %lld\n",
2449                (long long)adapter->sv_stats.icr_rxo);
2450         device_printf(dev, "ICR RXT0 = %lld\n",
2451                (long long)adapter->sv_stats.icr_rxt0);
2452         device_printf(dev, "ICR TXDW = %lld\n",
2453                (long long)adapter->sv_stats.icr_TXDW);
2454 #endif                          /* _SV_ */
2455
2456         bus_speed = adapter->hw.bus.speed;
2457         bus_type = adapter->hw.bus.type;
2458         sprintf(buf_speed,
2459                 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2460                 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2461                 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2462                 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2463                 "UNKNOWN");
2464         device_printf(dev, "PCI_Bus_Speed = %s\n",
2465                buf_speed);
2466
2467         sprintf(buf_type,
2468                 bus_type == ixgb_bus_type_pci ? "PCI" :
2469                 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2470                 "UNKNOWN");
2471         device_printf(dev, "PCI_Bus_Type = %s\n",
2472                buf_type);
2473
2474         device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2475                adapter->no_tx_desc_avail1);
2476         device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2477                adapter->no_tx_desc_avail2);
2478         device_printf(dev, "Std Mbuf Failed = %ld\n",
2479                adapter->mbuf_alloc_failed);
2480         device_printf(dev, "Std Cluster Failed = %ld\n",
2481                adapter->mbuf_cluster_failed);
2482
2483         device_printf(dev, "Defer count = %lld\n",
2484                (long long)adapter->stats.dc);
2485         device_printf(dev, "Missed Packets = %lld\n",
2486                (long long)adapter->stats.mpc);
2487         device_printf(dev, "Receive No Buffers = %lld\n",
2488                (long long)adapter->stats.rnbc);
2489         device_printf(dev, "Receive length errors = %lld\n",
2490                (long long)adapter->stats.rlec);
2491         device_printf(dev, "Crc errors = %lld\n",
2492                (long long)adapter->stats.crcerrs);
2493         device_printf(dev, "Driver dropped packets = %ld\n",
2494                adapter->dropped_pkts);
2495
2496         device_printf(dev, "XON Rcvd = %lld\n",
2497                (long long)adapter->stats.xonrxc);
2498         device_printf(dev, "XON Xmtd = %lld\n",
2499                (long long)adapter->stats.xontxc);
2500         device_printf(dev, "XOFF Rcvd = %lld\n",
2501                (long long)adapter->stats.xoffrxc);
2502         device_printf(dev, "XOFF Xmtd = %lld\n",
2503                (long long)adapter->stats.xofftxc);
2504
2505         device_printf(dev, "Good Packets Rcvd = %lld\n",
2506                (long long)adapter->stats.gprcl);
2507         device_printf(dev, "Good Packets Xmtd = %lld\n",
2508                (long long)adapter->stats.gptcl);
2509
2510         device_printf(dev, "Jumbo frames recvd = %lld\n",
2511                (long long)adapter->stats.jprcl);
2512         device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2513                (long long)adapter->stats.jptcl);
2514
2515         return;
2516
2517 }
2518
2519 static int
2520 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2521 {
2522         int             error;
2523         int             result;
2524         struct adapter *adapter;
2525
2526         result = -1;
2527         error = sysctl_handle_int(oidp, &result, 0, req);
2528
2529         if (error || !req->newptr)
2530                 return (error);
2531
2532         if (result == 1) {
2533                 adapter = (struct adapter *) arg1;
2534                 ixgb_print_hw_stats(adapter);
2535         }
2536         return error;
2537 }