]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgb/if_ixgb.c
Import libucl 0.8.0
[FreeBSD/FreeBSD.git] / sys / dev / ixgb / if_ixgb.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2004, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/ixgb/if_ixgb.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics
44  *********************************************************************/
45 int             ixgb_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Linked list of board private structures for all NICs found
49  *********************************************************************/
50
51 struct adapter *ixgb_adapter_list = NULL;
52
53
54
55 /*********************************************************************
56  *  Driver version
57  *********************************************************************/
58
59 char            ixgb_driver_version[] = "1.0.6";
60 char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62 /*********************************************************************
63  *  PCI Device ID Table
64  *
65  *  Used by probe to select devices to load on
66  *  Last field stores an index into ixgb_strings
67  *  Last entry must be all 0s
68  *
69  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70  *********************************************************************/
71
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73 {
74         /* Intel(R) PRO/10000 Network Connection */
75         {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76         {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77         /* required last entry */
78         {0, 0, 0, 0, 0}
79 };
80
81 /*********************************************************************
82  *  Table of branding strings for all supported NICs.
83  *********************************************************************/
84
85 static char    *ixgb_strings[] = {
86         "Intel(R) PRO/10GbE Network Driver"
87 };
88
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 static int      ixgb_probe(device_t);
93 static int      ixgb_attach(device_t);
94 static int      ixgb_detach(device_t);
95 static int      ixgb_shutdown(device_t);
96 static void     ixgb_intr(void *);
97 static void     ixgb_start(struct ifnet *);
98 static void     ixgb_start_locked(struct ifnet *);
99 static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static uint64_t ixgb_get_counter(struct ifnet *, ift_counter);
101 static void     ixgb_watchdog(struct adapter *);
102 static void     ixgb_init(void *);
103 static void     ixgb_init_locked(struct adapter *);
104 static void     ixgb_stop(void *);
105 static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
106 static int      ixgb_media_change(struct ifnet *);
107 static void     ixgb_identify_hardware(struct adapter *);
108 static int      ixgb_allocate_pci_resources(struct adapter *);
109 static void     ixgb_free_pci_resources(struct adapter *);
110 static void     ixgb_local_timer(void *);
111 static int      ixgb_hardware_init(struct adapter *);
112 static int      ixgb_setup_interface(device_t, struct adapter *);
113 static int      ixgb_setup_transmit_structures(struct adapter *);
114 static void     ixgb_initialize_transmit_unit(struct adapter *);
115 static int      ixgb_setup_receive_structures(struct adapter *);
116 static void     ixgb_initialize_receive_unit(struct adapter *);
117 static void     ixgb_enable_intr(struct adapter *);
118 static void     ixgb_disable_intr(struct adapter *);
119 static void     ixgb_free_transmit_structures(struct adapter *);
120 static void     ixgb_free_receive_structures(struct adapter *);
121 static void     ixgb_update_stats_counters(struct adapter *);
122 static void     ixgb_clean_transmit_interrupts(struct adapter *);
123 static int      ixgb_allocate_receive_structures(struct adapter *);
124 static int      ixgb_allocate_transmit_structures(struct adapter *);
125 static int      ixgb_process_receive_interrupts(struct adapter *, int);
126 static void 
127 ixgb_receive_checksum(struct adapter *,
128                       struct ixgb_rx_desc * rx_desc,
129                       struct mbuf *);
130 static void 
131 ixgb_transmit_checksum_setup(struct adapter *,
132                              struct mbuf *,
133                              u_int8_t *);
134 static void     ixgb_set_promisc(struct adapter *);
135 static void     ixgb_disable_promisc(struct adapter *);
136 static void     ixgb_set_multi(struct adapter *);
137 static void     ixgb_print_hw_stats(struct adapter *);
138 static void     ixgb_print_link_status(struct adapter *);
139 static int 
140 ixgb_get_buf(int i, struct adapter *,
141              struct mbuf *);
142 static void     ixgb_enable_vlans(struct adapter * adapter);
143 static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
144 static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145 static int 
146 ixgb_dma_malloc(struct adapter *, bus_size_t,
147                 struct ixgb_dma_alloc *, int);
148 static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
149 #ifdef DEVICE_POLLING
150 static poll_handler_t ixgb_poll;
151 #endif
152
153 /*********************************************************************
154  *  FreeBSD Device Interface Entry Points
155  *********************************************************************/
156
157 static device_method_t ixgb_methods[] = {
158         /* Device interface */
159         DEVMETHOD(device_probe, ixgb_probe),
160         DEVMETHOD(device_attach, ixgb_attach),
161         DEVMETHOD(device_detach, ixgb_detach),
162         DEVMETHOD(device_shutdown, ixgb_shutdown),
163
164         DEVMETHOD_END
165 };
166
167 static driver_t ixgb_driver = {
168         "ixgb", ixgb_methods, sizeof(struct adapter),
169 };
170
171 static devclass_t ixgb_devclass;
172 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
173
174 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
175 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
176
177 /* some defines for controlling descriptor fetches in h/w */
178 #define RXDCTL_PTHRESH_DEFAULT 128      /* chip considers prefech below this */
179 #define RXDCTL_HTHRESH_DEFAULT 16       /* chip will only prefetch if tail is
180                                          * pushed this many descriptors from
181                                          * head */
182 #define RXDCTL_WTHRESH_DEFAULT 0        /* chip writes back at this many or RXT0 */
183
184
185 /*********************************************************************
186  *  Device identification routine
187  *
188  *  ixgb_probe determines if the driver should be loaded on
189  *  adapter based on PCI vendor/device id of the adapter.
190  *
191  *  return 0 on success, positive on failure
192  *********************************************************************/
193
194 static int
195 ixgb_probe(device_t dev)
196 {
197         ixgb_vendor_info_t *ent;
198
199         u_int16_t       pci_vendor_id = 0;
200         u_int16_t       pci_device_id = 0;
201         u_int16_t       pci_subvendor_id = 0;
202         u_int16_t       pci_subdevice_id = 0;
203         char            adapter_name[60];
204
205         INIT_DEBUGOUT("ixgb_probe: begin");
206
207         pci_vendor_id = pci_get_vendor(dev);
208         if (pci_vendor_id != IXGB_VENDOR_ID)
209                 return (ENXIO);
210
211         pci_device_id = pci_get_device(dev);
212         pci_subvendor_id = pci_get_subvendor(dev);
213         pci_subdevice_id = pci_get_subdevice(dev);
214
215         ent = ixgb_vendor_info_array;
216         while (ent->vendor_id != 0) {
217                 if ((pci_vendor_id == ent->vendor_id) &&
218                     (pci_device_id == ent->device_id) &&
219
220                     ((pci_subvendor_id == ent->subvendor_id) ||
221                      (ent->subvendor_id == PCI_ANY_ID)) &&
222
223                     ((pci_subdevice_id == ent->subdevice_id) ||
224                      (ent->subdevice_id == PCI_ANY_ID))) {
225                         sprintf(adapter_name, "%s, Version - %s",
226                                 ixgb_strings[ent->index],
227                                 ixgb_driver_version);
228                         device_set_desc_copy(dev, adapter_name);
229                         return (BUS_PROBE_DEFAULT);
230                 }
231                 ent++;
232         }
233
234         return (ENXIO);
235 }
236
237 /*********************************************************************
238  *  Device initialization routine
239  *
240  *  The attach entry point is called when the driver is being loaded.
241  *  This routine identifies the type of hardware, allocates all resources
242  *  and initializes the hardware.
243  *
244  *  return 0 on success, positive on failure
245  *********************************************************************/
246
247 static int
248 ixgb_attach(device_t dev)
249 {
250         struct adapter *adapter;
251         int             tsize, rsize;
252         int             error = 0;
253
254         device_printf(dev, "%s\n", ixgb_copyright);
255         INIT_DEBUGOUT("ixgb_attach: begin");
256
257         /* Allocate, clear, and link in our adapter structure */
258         if (!(adapter = device_get_softc(dev))) {
259                 device_printf(dev, "adapter structure allocation failed\n");
260                 return (ENOMEM);
261         }
262         bzero(adapter, sizeof(struct adapter));
263         adapter->dev = dev;
264         adapter->osdep.dev = dev;
265         IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
266
267         if (ixgb_adapter_list != NULL)
268                 ixgb_adapter_list->prev = adapter;
269         adapter->next = ixgb_adapter_list;
270         ixgb_adapter_list = adapter;
271
272         /* SYSCTL APIs */
273         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
274                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
275                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
276                         (void *)adapter, 0,
277                         ixgb_sysctl_stats, "I", "Statistics");
278
279         callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
280
281         /* Determine hardware revision */
282         ixgb_identify_hardware(adapter);
283
284         /* Parameters (to be read from user) */
285         adapter->num_tx_desc = IXGB_MAX_TXD;
286         adapter->num_rx_desc = IXGB_MAX_RXD;
287         adapter->tx_int_delay = TIDV;
288         adapter->rx_int_delay = RDTR;
289         adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
290
291         adapter->hw.fc.high_water = FCRTH;
292         adapter->hw.fc.low_water = FCRTL;
293         adapter->hw.fc.pause_time = FCPAUSE;
294         adapter->hw.fc.send_xon = TRUE;
295         adapter->hw.fc.type = FLOW_CONTROL;
296
297
298         /* Set the max frame size assuming standard ethernet sized frames */
299         adapter->hw.max_frame_size =
300                 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
301
302         if (ixgb_allocate_pci_resources(adapter)) {
303                 device_printf(dev, "Allocation of PCI resources failed\n");
304                 error = ENXIO;
305                 goto err_pci;
306         }
307         tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
308                              sizeof(struct ixgb_tx_desc), 4096);
309
310         /* Allocate Transmit Descriptor ring */
311         if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
312                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
313                 error = ENOMEM;
314                 goto err_tx_desc;
315         }
316         adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
317
318         rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
319                              sizeof(struct ixgb_rx_desc), 4096);
320
321         /* Allocate Receive Descriptor ring */
322         if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
323                 device_printf(dev, "Unable to allocate rx_desc memory\n");
324                 error = ENOMEM;
325                 goto err_rx_desc;
326         }
327         adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
328
329         /* Allocate multicast array memory. */
330         adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
331             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
332         if (adapter->mta == NULL) {
333                 device_printf(dev, "Can not allocate multicast setup array\n");
334                 error = ENOMEM;
335                 goto err_hw_init;
336         }
337
338         /* Initialize the hardware */
339         if (ixgb_hardware_init(adapter)) {
340                 device_printf(dev, "Unable to initialize the hardware\n");
341                 error = EIO;
342                 goto err_hw_init;
343         }
344         /* Setup OS specific network interface */
345         if (ixgb_setup_interface(dev, adapter) != 0)
346                 goto err_hw_init;
347
348         /* Initialize statistics */
349         ixgb_clear_hw_cntrs(&adapter->hw);
350         ixgb_update_stats_counters(adapter);
351
352         INIT_DEBUGOUT("ixgb_attach: end");
353         return (0);
354
355 err_hw_init:
356         ixgb_dma_free(adapter, &adapter->rxdma);
357 err_rx_desc:
358         ixgb_dma_free(adapter, &adapter->txdma);
359 err_tx_desc:
360 err_pci:
361         if (adapter->ifp != NULL)
362                 if_free(adapter->ifp);
363         ixgb_free_pci_resources(adapter);
364         sysctl_ctx_free(&adapter->sysctl_ctx);
365         free(adapter->mta, M_DEVBUF);
366         return (error);
367
368 }
369
370 /*********************************************************************
371  *  Device removal routine
372  *
373  *  The detach entry point is called when the driver is being removed.
374  *  This routine stops the adapter and deallocates all the resources
375  *  that were allocated for driver operation.
376  *
377  *  return 0 on success, positive on failure
378  *********************************************************************/
379
380 static int
381 ixgb_detach(device_t dev)
382 {
383         struct adapter *adapter = device_get_softc(dev);
384         struct ifnet   *ifp = adapter->ifp;
385
386         INIT_DEBUGOUT("ixgb_detach: begin");
387
388 #ifdef DEVICE_POLLING
389         if (ifp->if_capenable & IFCAP_POLLING)
390                 ether_poll_deregister(ifp);
391 #endif
392
393         IXGB_LOCK(adapter);
394         adapter->in_detach = 1;
395
396         ixgb_stop(adapter);
397         IXGB_UNLOCK(adapter);
398
399 #if __FreeBSD_version < 500000
400         ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
401 #else
402         ether_ifdetach(ifp);
403 #endif
404         callout_drain(&adapter->timer);
405         ixgb_free_pci_resources(adapter);
406 #if __FreeBSD_version >= 500000
407         if_free(ifp);
408 #endif
409
410         /* Free Transmit Descriptor ring */
411         if (adapter->tx_desc_base) {
412                 ixgb_dma_free(adapter, &adapter->txdma);
413                 adapter->tx_desc_base = NULL;
414         }
415         /* Free Receive Descriptor ring */
416         if (adapter->rx_desc_base) {
417                 ixgb_dma_free(adapter, &adapter->rxdma);
418                 adapter->rx_desc_base = NULL;
419         }
420         /* Remove from the adapter list */
421         if (ixgb_adapter_list == adapter)
422                 ixgb_adapter_list = adapter->next;
423         if (adapter->next != NULL)
424                 adapter->next->prev = adapter->prev;
425         if (adapter->prev != NULL)
426                 adapter->prev->next = adapter->next;
427         free(adapter->mta, M_DEVBUF);
428
429         IXGB_LOCK_DESTROY(adapter);
430         return (0);
431 }
432
433 /*********************************************************************
434  *
435  *  Shutdown entry point
436  *
437  **********************************************************************/
438
439 static int
440 ixgb_shutdown(device_t dev)
441 {
442         struct adapter *adapter = device_get_softc(dev);
443         IXGB_LOCK(adapter);
444         ixgb_stop(adapter);
445         IXGB_UNLOCK(adapter);
446         return (0);
447 }
448
449
450 /*********************************************************************
451  *  Transmit entry point
452  *
453  *  ixgb_start is called by the stack to initiate a transmit.
454  *  The driver will remain in this routine as long as there are
455  *  packets to transmit and transmit resources are available.
456  *  In case resources are not available stack is notified and
457  *  the packet is requeued.
458  **********************************************************************/
459
460 static void
461 ixgb_start_locked(struct ifnet * ifp)
462 {
463         struct mbuf    *m_head;
464         struct adapter *adapter = ifp->if_softc;
465
466         IXGB_LOCK_ASSERT(adapter);
467
468         if (!adapter->link_active)
469                 return;
470
471         while (ifp->if_snd.ifq_head != NULL) {
472                 IF_DEQUEUE(&ifp->if_snd, m_head);
473
474                 if (m_head == NULL)
475                         break;
476
477                 if (ixgb_encap(adapter, m_head)) {
478                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
479                         IF_PREPEND(&ifp->if_snd, m_head);
480                         break;
481                 }
482                 /* Send a copy of the frame to the BPF listener */
483 #if __FreeBSD_version < 500000
484                 if (ifp->if_bpf)
485                         bpf_mtap(ifp, m_head);
486 #else
487                 ETHER_BPF_MTAP(ifp, m_head);
488 #endif
489                 /* Set timeout in case hardware has problems transmitting */
490                 adapter->tx_timer = IXGB_TX_TIMEOUT;
491
492         }
493         return;
494 }
495
496 static void
497 ixgb_start(struct ifnet *ifp)
498 {
499         struct adapter *adapter = ifp->if_softc;
500
501         IXGB_LOCK(adapter);
502         ixgb_start_locked(ifp);
503         IXGB_UNLOCK(adapter);
504         return;
505 }
506
507 /*********************************************************************
508  *  Ioctl entry point
509  *
510  *  ixgb_ioctl is called when the user wants to configure the
511  *  interface.
512  *
513  *  return 0 on success, positive on failure
514  **********************************************************************/
515
516 static int
517 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
518 {
519         int             mask, error = 0;
520         struct ifreq   *ifr = (struct ifreq *) data;
521         struct adapter *adapter = ifp->if_softc;
522
523         if (adapter->in_detach)
524                 goto out;
525
526         switch (command) {
527         case SIOCSIFADDR:
528         case SIOCGIFADDR:
529                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
530                 ether_ioctl(ifp, command, data);
531                 break;
532         case SIOCSIFMTU:
533                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
534                 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
535                         error = EINVAL;
536                 } else {
537                         IXGB_LOCK(adapter);
538                         ifp->if_mtu = ifr->ifr_mtu;
539                         adapter->hw.max_frame_size =
540                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
541
542                         ixgb_init_locked(adapter);
543                         IXGB_UNLOCK(adapter);
544                 }
545                 break;
546         case SIOCSIFFLAGS:
547                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
548                 IXGB_LOCK(adapter);
549                 if (ifp->if_flags & IFF_UP) {
550                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
551                                 ixgb_init_locked(adapter);
552                         }
553                         ixgb_disable_promisc(adapter);
554                         ixgb_set_promisc(adapter);
555                 } else {
556                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
557                                 ixgb_stop(adapter);
558                         }
559                 }
560                 IXGB_UNLOCK(adapter);
561                 break;
562         case SIOCADDMULTI:
563         case SIOCDELMULTI:
564                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
565                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
566                         IXGB_LOCK(adapter);
567                         ixgb_disable_intr(adapter);
568                         ixgb_set_multi(adapter);
569                         ixgb_enable_intr(adapter);
570                         IXGB_UNLOCK(adapter);
571                 }
572                 break;
573         case SIOCSIFMEDIA:
574         case SIOCGIFMEDIA:
575                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
576                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
577                 break;
578         case SIOCSIFCAP:
579                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
580                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
581 #ifdef DEVICE_POLLING
582                 if (mask & IFCAP_POLLING) {
583                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
584                                 error = ether_poll_register(ixgb_poll, ifp);
585                                 if (error)
586                                         return(error);
587                                 IXGB_LOCK(adapter);
588                                 ixgb_disable_intr(adapter);
589                                 ifp->if_capenable |= IFCAP_POLLING;
590                                 IXGB_UNLOCK(adapter);
591                         } else {
592                                 error = ether_poll_deregister(ifp);
593                                 /* Enable interrupt even in error case */
594                                 IXGB_LOCK(adapter);
595                                 ixgb_enable_intr(adapter);
596                                 ifp->if_capenable &= ~IFCAP_POLLING;
597                                 IXGB_UNLOCK(adapter);
598                         }
599                 }
600 #endif /* DEVICE_POLLING */
601                 if (mask & IFCAP_HWCSUM) {
602                         if (IFCAP_HWCSUM & ifp->if_capenable)
603                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
604                         else
605                                 ifp->if_capenable |= IFCAP_HWCSUM;
606                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
607                                 ixgb_init(adapter);
608                 }
609                 break;
610         default:
611                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
612                 error = EINVAL;
613         }
614
615 out:
616         return (error);
617 }
618
619 /*********************************************************************
620  *  Watchdog entry point
621  *
622  *  This routine is called whenever hardware quits transmitting.
623  *
624  **********************************************************************/
625
626 static void
627 ixgb_watchdog(struct adapter *adapter)
628 {
629         struct ifnet *ifp;
630
631         ifp = adapter->ifp;
632
633         /*
634          * If we are in this routine because of pause frames, then don't
635          * reset the hardware.
636          */
637         if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
638                 adapter->tx_timer = IXGB_TX_TIMEOUT;
639                 return;
640         }
641         if_printf(ifp, "watchdog timeout -- resetting\n");
642
643         ixgb_stop(adapter);
644         ixgb_init_locked(adapter);
645
646
647         if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
648
649         return;
650 }
651
652 /*********************************************************************
653  *  Init entry point
654  *
655  *  This routine is used in two ways. It is used by the stack as
656  *  init entry point in network interface structure. It is also used
657  *  by the driver as a hw/sw initialization routine to get to a
658  *  consistent state.
659  *
660  *  return 0 on success, positive on failure
661  **********************************************************************/
662
663 static void
664 ixgb_init_locked(struct adapter *adapter)
665 {
666         struct ifnet   *ifp;
667
668         INIT_DEBUGOUT("ixgb_init: begin");
669
670         IXGB_LOCK_ASSERT(adapter);
671
672         ixgb_stop(adapter);
673         ifp = adapter->ifp;
674
675         /* Get the latest mac address, User can use a LAA */
676         bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
677             IXGB_ETH_LENGTH_OF_ADDRESS);
678
679         /* Initialize the hardware */
680         if (ixgb_hardware_init(adapter)) {
681                 if_printf(ifp, "Unable to initialize the hardware\n");
682                 return;
683         }
684         ixgb_enable_vlans(adapter);
685
686         /* Prepare transmit descriptors and buffers */
687         if (ixgb_setup_transmit_structures(adapter)) {
688                 if_printf(ifp, "Could not setup transmit structures\n");
689                 ixgb_stop(adapter);
690                 return;
691         }
692         ixgb_initialize_transmit_unit(adapter);
693
694         /* Setup Multicast table */
695         ixgb_set_multi(adapter);
696
697         /* Prepare receive descriptors and buffers */
698         if (ixgb_setup_receive_structures(adapter)) {
699                 if_printf(ifp, "Could not setup receive structures\n");
700                 ixgb_stop(adapter);
701                 return;
702         }
703         ixgb_initialize_receive_unit(adapter);
704
705         /* Don't lose promiscuous settings */
706         ixgb_set_promisc(adapter);
707
708         ifp = adapter->ifp;
709         ifp->if_drv_flags |= IFF_DRV_RUNNING;
710         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
711
712
713         if (ifp->if_capenable & IFCAP_TXCSUM)
714                 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
715         else
716                 ifp->if_hwassist = 0;
717
718
719         /* Enable jumbo frames */
720         if (ifp->if_mtu > ETHERMTU) {
721                 uint32_t        temp_reg;
722                 IXGB_WRITE_REG(&adapter->hw, MFS,
723                                adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
724                 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
725                 temp_reg |= IXGB_CTRL0_JFE;
726                 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
727         }
728         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
729         ixgb_clear_hw_cntrs(&adapter->hw);
730 #ifdef DEVICE_POLLING
731         /*
732          * Only disable interrupts if we are polling, make sure they are on
733          * otherwise.
734          */
735         if (ifp->if_capenable & IFCAP_POLLING)
736                 ixgb_disable_intr(adapter);
737         else
738 #endif
739                 ixgb_enable_intr(adapter);
740
741         return;
742 }
743
744 static void
745 ixgb_init(void *arg)
746 {
747         struct adapter *adapter = arg;
748
749         IXGB_LOCK(adapter);
750         ixgb_init_locked(adapter);
751         IXGB_UNLOCK(adapter);
752         return;
753 }
754
755 #ifdef DEVICE_POLLING
756 static int
757 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
758 {
759         struct adapter *adapter = ifp->if_softc;
760         u_int32_t       reg_icr;
761         int             rx_npkts;
762
763         IXGB_LOCK_ASSERT(adapter);
764
765         if (cmd == POLL_AND_CHECK_STATUS) {
766                 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
767                 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
768                         ixgb_check_for_link(&adapter->hw);
769                         ixgb_print_link_status(adapter);
770                 }
771         }
772         rx_npkts = ixgb_process_receive_interrupts(adapter, count);
773         ixgb_clean_transmit_interrupts(adapter);
774
775         if (ifp->if_snd.ifq_head != NULL)
776                 ixgb_start_locked(ifp);
777         return (rx_npkts);
778 }
779
780 static int
781 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
782 {
783         struct adapter *adapter = ifp->if_softc;
784         int rx_npkts = 0;
785
786         IXGB_LOCK(adapter);
787         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
788                 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
789         IXGB_UNLOCK(adapter);
790         return (rx_npkts);
791 }
792 #endif /* DEVICE_POLLING */
793
794 /*********************************************************************
795  *
796  *  Interrupt Service routine
797  *
798  **********************************************************************/
799
800 static void
801 ixgb_intr(void *arg)
802 {
803         u_int32_t       loop_cnt = IXGB_MAX_INTR;
804         u_int32_t       reg_icr;
805         struct ifnet   *ifp;
806         struct adapter *adapter = arg;
807         boolean_t       rxdmt0 = FALSE;
808
809         IXGB_LOCK(adapter);
810
811         ifp = adapter->ifp;
812
813 #ifdef DEVICE_POLLING
814         if (ifp->if_capenable & IFCAP_POLLING) {
815                 IXGB_UNLOCK(adapter);
816                 return;
817         }
818 #endif
819
820         reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
821         if (reg_icr == 0) {
822                 IXGB_UNLOCK(adapter);
823                 return;
824         }
825
826         if (reg_icr & IXGB_INT_RXDMT0)
827                 rxdmt0 = TRUE;
828
829 #ifdef _SV_
830         if (reg_icr & IXGB_INT_RXDMT0)
831                 adapter->sv_stats.icr_rxdmt0++;
832         if (reg_icr & IXGB_INT_RXO)
833                 adapter->sv_stats.icr_rxo++;
834         if (reg_icr & IXGB_INT_RXT0)
835                 adapter->sv_stats.icr_rxt0++;
836         if (reg_icr & IXGB_INT_TXDW)
837                 adapter->sv_stats.icr_TXDW++;
838 #endif                          /* _SV_ */
839
840         /* Link status change */
841         if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
842                 ixgb_check_for_link(&adapter->hw);
843                 ixgb_print_link_status(adapter);
844         }
845         while (loop_cnt > 0) {
846                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
847                         ixgb_process_receive_interrupts(adapter, -1);
848                         ixgb_clean_transmit_interrupts(adapter);
849                 }
850                 loop_cnt--;
851         }
852
853         if (rxdmt0 && adapter->raidc) {
854                 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
855                 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
856         }
857         if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
858                 ixgb_start_locked(ifp);
859
860         IXGB_UNLOCK(adapter);
861         return;
862 }
863
864
865 /*********************************************************************
866  *
867  *  Media Ioctl callback
868  *
869  *  This routine is called whenever the user queries the status of
870  *  the interface using ifconfig.
871  *
872  **********************************************************************/
873 static void
874 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
875 {
876         struct adapter *adapter = ifp->if_softc;
877
878         INIT_DEBUGOUT("ixgb_media_status: begin");
879
880         ixgb_check_for_link(&adapter->hw);
881         ixgb_print_link_status(adapter);
882
883         ifmr->ifm_status = IFM_AVALID;
884         ifmr->ifm_active = IFM_ETHER;
885
886         if (!adapter->hw.link_up)
887                 return;
888
889         ifmr->ifm_status |= IFM_ACTIVE;
890         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
891
892         return;
893 }
894
895 /*********************************************************************
896  *
897  *  Media Ioctl callback
898  *
899  *  This routine is called when the user changes speed/duplex using
900  *  media/mediopt option with ifconfig.
901  *
902  **********************************************************************/
903 static int
904 ixgb_media_change(struct ifnet * ifp)
905 {
906         struct adapter *adapter = ifp->if_softc;
907         struct ifmedia *ifm = &adapter->media;
908
909         INIT_DEBUGOUT("ixgb_media_change: begin");
910
911         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
912                 return (EINVAL);
913
914         return (0);
915 }
916
917 /*********************************************************************
918  *
919  *  This routine maps the mbufs to tx descriptors.
920  *
921  *  return 0 on success, positive on failure
922  **********************************************************************/
923
924 static int
925 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
926 {
927         u_int8_t        txd_popts;
928         int             i, j, error, nsegs;
929
930 #if __FreeBSD_version < 500000
931         struct ifvlan  *ifv = NULL;
932 #endif
933         bus_dma_segment_t segs[IXGB_MAX_SCATTER];
934         bus_dmamap_t    map;
935         struct ixgb_buffer *tx_buffer = NULL;
936         struct ixgb_tx_desc *current_tx_desc = NULL;
937         struct ifnet   *ifp = adapter->ifp;
938
939         /*
940          * Force a cleanup if number of TX descriptors available hits the
941          * threshold
942          */
943         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
944                 ixgb_clean_transmit_interrupts(adapter);
945         }
946         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
947                 adapter->no_tx_desc_avail1++;
948                 return (ENOBUFS);
949         }
950         /*
951          * Map the packet for DMA.
952          */
953         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
954                 adapter->no_tx_map_avail++;
955                 return (ENOMEM);
956         }
957         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
958                                         &nsegs, BUS_DMA_NOWAIT);
959         if (error != 0) {
960                 adapter->no_tx_dma_setup++;
961                 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
962                        "error %u\n", error);
963                 bus_dmamap_destroy(adapter->txtag, map);
964                 return (error);
965         }
966         KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
967
968         if (nsegs > adapter->num_tx_desc_avail) {
969                 adapter->no_tx_desc_avail2++;
970                 bus_dmamap_destroy(adapter->txtag, map);
971                 return (ENOBUFS);
972         }
973         if (ifp->if_hwassist > 0) {
974                 ixgb_transmit_checksum_setup(adapter, m_head,
975                                              &txd_popts);
976         } else
977                 txd_popts = 0;
978
979         /* Find out if we are in vlan mode */
980 #if __FreeBSD_version < 500000
981         if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
982             m_head->m_pkthdr.rcvif != NULL &&
983             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
984                 ifv = m_head->m_pkthdr.rcvif->if_softc;
985 #elseif __FreeBSD_version < 700000
986         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
987 #endif
988         i = adapter->next_avail_tx_desc;
989         for (j = 0; j < nsegs; j++) {
990                 tx_buffer = &adapter->tx_buffer_area[i];
991                 current_tx_desc = &adapter->tx_desc_base[i];
992
993                 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
994                 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
995                 current_tx_desc->popts = txd_popts;
996                 if (++i == adapter->num_tx_desc)
997                         i = 0;
998
999                 tx_buffer->m_head = NULL;
1000         }
1001
1002         adapter->num_tx_desc_avail -= nsegs;
1003         adapter->next_avail_tx_desc = i;
1004
1005 #if __FreeBSD_version < 500000
1006         if (ifv != NULL) {
1007                 /* Set the vlan id */
1008                 current_tx_desc->vlan = ifv->ifv_tag;
1009 #elseif __FreeBSD_version < 700000
1010         if (mtag != NULL) {
1011                 /* Set the vlan id */
1012                 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1013 #else
1014         if (m_head->m_flags & M_VLANTAG) {
1015                 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1016 #endif
1017
1018                 /* Tell hardware to add tag */
1019                 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1020         }
1021         tx_buffer->m_head = m_head;
1022         tx_buffer->map = map;
1023         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1024
1025         /*
1026          * Last Descriptor of Packet needs End Of Packet (EOP)
1027          */
1028         current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1029
1030         /*
1031          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1032          * that this frame is available to transmit.
1033          */
1034         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1035
1036         return (0);
1037 }
1038
1039 static void
1040 ixgb_set_promisc(struct adapter * adapter)
1041 {
1042
1043         u_int32_t       reg_rctl;
1044         struct ifnet   *ifp = adapter->ifp;
1045
1046         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1047
1048         if (ifp->if_flags & IFF_PROMISC) {
1049                 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1050                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1051         } else if (ifp->if_flags & IFF_ALLMULTI) {
1052                 reg_rctl |= IXGB_RCTL_MPE;
1053                 reg_rctl &= ~IXGB_RCTL_UPE;
1054                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1055         }
1056         return;
1057 }
1058
1059 static void
1060 ixgb_disable_promisc(struct adapter * adapter)
1061 {
1062         u_int32_t       reg_rctl;
1063
1064         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1065
1066         reg_rctl &= (~IXGB_RCTL_UPE);
1067         reg_rctl &= (~IXGB_RCTL_MPE);
1068         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1069
1070         return;
1071 }
1072
1073
1074 /*********************************************************************
1075  *  Multicast Update
1076  *
1077  *  This routine is called whenever multicast address list is updated.
1078  *
1079  **********************************************************************/
1080
1081 static void
1082 ixgb_set_multi(struct adapter * adapter)
1083 {
1084         u_int32_t       reg_rctl = 0;
1085         u_int8_t        *mta;
1086         struct ifmultiaddr *ifma;
1087         int             mcnt = 0;
1088         struct ifnet   *ifp = adapter->ifp;
1089
1090         IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1091
1092         mta = adapter->mta;
1093         bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1094             MAX_NUM_MULTICAST_ADDRESSES);
1095
1096         if_maddr_rlock(ifp);
1097 #if __FreeBSD_version < 500000
1098         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1099 #else
1100         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1101 #endif
1102                 if (ifma->ifma_addr->sa_family != AF_LINK)
1103                         continue;
1104
1105                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1106                       &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1107                 mcnt++;
1108         }
1109         if_maddr_runlock(ifp);
1110
1111         if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1112                 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1113                 reg_rctl |= IXGB_RCTL_MPE;
1114                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1115         } else
1116                 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1117
1118         return;
1119 }
1120
1121
1122 /*********************************************************************
1123  *  Timer routine
1124  *
1125  *  This routine checks for link status and updates statistics.
1126  *
1127  **********************************************************************/
1128
1129 static void
1130 ixgb_local_timer(void *arg)
1131 {
1132         struct ifnet   *ifp;
1133         struct adapter *adapter = arg;
1134         ifp = adapter->ifp;
1135
1136         IXGB_LOCK_ASSERT(adapter);
1137
1138         ixgb_check_for_link(&adapter->hw);
1139         ixgb_print_link_status(adapter);
1140         ixgb_update_stats_counters(adapter);
1141         if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1142                 ixgb_print_hw_stats(adapter);
1143         }
1144         if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1145                 ixgb_watchdog(adapter);
1146         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1147 }
1148
1149 static void
1150 ixgb_print_link_status(struct adapter * adapter)
1151 {
1152         if (adapter->hw.link_up) {
1153                 if (!adapter->link_active) {
1154                         if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1155                                10000,
1156                                "Full Duplex");
1157                         adapter->link_active = 1;
1158                 }
1159         } else {
1160                 if (adapter->link_active) {
1161                         if_printf(adapter->ifp, "Link is Down \n");
1162                         adapter->link_active = 0;
1163                 }
1164         }
1165
1166         return;
1167 }
1168
1169
1170
1171 /*********************************************************************
1172  *
1173  *  This routine disables all traffic on the adapter by issuing a
1174  *  global reset on the MAC and deallocates TX/RX buffers.
1175  *
1176  **********************************************************************/
1177
1178 static void
1179 ixgb_stop(void *arg)
1180 {
1181         struct ifnet   *ifp;
1182         struct adapter *adapter = arg;
1183         ifp = adapter->ifp;
1184
1185         IXGB_LOCK_ASSERT(adapter);
1186
1187         INIT_DEBUGOUT("ixgb_stop: begin\n");
1188         ixgb_disable_intr(adapter);
1189         adapter->hw.adapter_stopped = FALSE;
1190         ixgb_adapter_stop(&adapter->hw);
1191         callout_stop(&adapter->timer);
1192         ixgb_free_transmit_structures(adapter);
1193         ixgb_free_receive_structures(adapter);
1194
1195         /* Tell the stack that the interface is no longer active */
1196         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1197         adapter->tx_timer = 0;
1198
1199         return;
1200 }
1201
1202
1203 /*********************************************************************
1204  *
1205  *  Determine hardware revision.
1206  *
1207  **********************************************************************/
1208 static void
1209 ixgb_identify_hardware(struct adapter * adapter)
1210 {
1211         device_t        dev = adapter->dev;
1212
1213         /* Make sure our PCI config space has the necessary stuff set */
1214         pci_enable_busmaster(dev);
1215         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1216
1217         /* Save off the information about this board */
1218         adapter->hw.vendor_id = pci_get_vendor(dev);
1219         adapter->hw.device_id = pci_get_device(dev);
1220         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1221         adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1222         adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1223
1224         /* Set MacType, etc. based on this PCI info */
1225         switch (adapter->hw.device_id) {
1226         case IXGB_DEVICE_ID_82597EX:
1227         case IXGB_DEVICE_ID_82597EX_SR:
1228                 adapter->hw.mac_type = ixgb_82597;
1229                 break;
1230         default:
1231                 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1232                 device_printf(dev, "unsupported device id 0x%x\n",
1233                     adapter->hw.device_id);
1234         }
1235
1236         return;
1237 }
1238
1239 static int
1240 ixgb_allocate_pci_resources(struct adapter * adapter)
1241 {
1242         int             rid;
1243         device_t        dev = adapter->dev;
1244
1245         rid = IXGB_MMBA;
1246         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1247                                                  &rid,
1248                                                  RF_ACTIVE);
1249         if (!(adapter->res_memory)) {
1250                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1251                 return (ENXIO);
1252         }
1253         adapter->osdep.mem_bus_space_tag =
1254                 rman_get_bustag(adapter->res_memory);
1255         adapter->osdep.mem_bus_space_handle =
1256                 rman_get_bushandle(adapter->res_memory);
1257         adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1258
1259         rid = 0x0;
1260         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1261                                                         &rid,
1262                                                         RF_SHAREABLE | RF_ACTIVE);
1263         if (!(adapter->res_interrupt)) {
1264                 device_printf(dev,
1265                     "Unable to allocate bus resource: interrupt\n");
1266                 return (ENXIO);
1267         }
1268         if (bus_setup_intr(dev, adapter->res_interrupt,
1269                            INTR_TYPE_NET | INTR_MPSAFE,
1270                            NULL, (void (*) (void *))ixgb_intr, adapter,
1271                            &adapter->int_handler_tag)) {
1272                 device_printf(dev, "Error registering interrupt handler!\n");
1273                 return (ENXIO);
1274         }
1275         adapter->hw.back = &adapter->osdep;
1276
1277         return (0);
1278 }
1279
1280 static void
1281 ixgb_free_pci_resources(struct adapter * adapter)
1282 {
1283         device_t        dev = adapter->dev;
1284
1285         if (adapter->res_interrupt != NULL) {
1286                 bus_teardown_intr(dev, adapter->res_interrupt,
1287                                   adapter->int_handler_tag);
1288                 bus_release_resource(dev, SYS_RES_IRQ, 0,
1289                                      adapter->res_interrupt);
1290         }
1291         if (adapter->res_memory != NULL) {
1292                 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1293                                      adapter->res_memory);
1294         }
1295         if (adapter->res_ioport != NULL) {
1296                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1297                                      adapter->res_ioport);
1298         }
1299         return;
1300 }
1301
1302 /*********************************************************************
1303  *
1304  *  Initialize the hardware to a configuration as specified by the
1305  *  adapter structure. The controller is reset, the EEPROM is
1306  *  verified, the MAC address is set, then the shared initialization
1307  *  routines are called.
1308  *
1309  **********************************************************************/
1310 static int
1311 ixgb_hardware_init(struct adapter * adapter)
1312 {
1313         /* Issue a global reset */
1314         adapter->hw.adapter_stopped = FALSE;
1315         ixgb_adapter_stop(&adapter->hw);
1316
1317         /* Make sure we have a good EEPROM before we read from it */
1318         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1319                 device_printf(adapter->dev,
1320                     "The EEPROM Checksum Is Not Valid\n");
1321                 return (EIO);
1322         }
1323         if (!ixgb_init_hw(&adapter->hw)) {
1324                 device_printf(adapter->dev, "Hardware Initialization Failed");
1325                 return (EIO);
1326         }
1327
1328         return (0);
1329 }
1330
1331 /*********************************************************************
1332  *
1333  *  Setup networking device structure and register an interface.
1334  *
1335  **********************************************************************/
1336 static int
1337 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1338 {
1339         struct ifnet   *ifp;
1340         INIT_DEBUGOUT("ixgb_setup_interface: begin");
1341
1342         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1343         if (ifp == NULL) {
1344                 device_printf(dev, "can not allocate ifnet structure\n");
1345                 return (-1);
1346         }
1347 #if __FreeBSD_version >= 502000
1348         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1349 #else
1350         ifp->if_unit = device_get_unit(dev);
1351         ifp->if_name = "ixgb";
1352 #endif
1353         ifp->if_baudrate = 1000000000;
1354         ifp->if_init = ixgb_init;
1355         ifp->if_softc = adapter;
1356         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1357         ifp->if_ioctl = ixgb_ioctl;
1358         ifp->if_start = ixgb_start;
1359         ifp->if_get_counter = ixgb_get_counter;
1360         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1361
1362 #if __FreeBSD_version < 500000
1363         ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1364 #else
1365         ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1366 #endif
1367
1368         ifp->if_capabilities = IFCAP_HWCSUM;
1369
1370         /*
1371          * Tell the upper layer(s) we support long frames.
1372          */
1373         ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1374
1375 #if __FreeBSD_version >= 500000
1376         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1377 #endif
1378
1379         ifp->if_capenable = ifp->if_capabilities;
1380
1381 #ifdef DEVICE_POLLING
1382         ifp->if_capabilities |= IFCAP_POLLING;
1383 #endif
1384
1385         /*
1386          * Specify the media types supported by this adapter and register
1387          * callbacks to update media and link information
1388          */
1389         ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1390                      ixgb_media_status);
1391         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1392                     0, NULL);
1393         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1394                     0, NULL);
1395         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1396         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1397
1398         return (0);
1399 }
1400
1401 /********************************************************************
1402  * Manage DMA'able memory.
1403  *******************************************************************/
1404 static void
1405 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1406 {
1407         if (error)
1408                 return;
1409         *(bus_addr_t *) arg = segs->ds_addr;
1410         return;
1411 }
1412
1413 static int
1414 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1415                 struct ixgb_dma_alloc * dma, int mapflags)
1416 {
1417         device_t dev;
1418         int             r;
1419
1420         dev = adapter->dev;
1421         r = bus_dma_tag_create(bus_get_dma_tag(dev),    /* parent */
1422                                PAGE_SIZE, 0,    /* alignment, bounds */
1423                                BUS_SPACE_MAXADDR,       /* lowaddr */
1424                                BUS_SPACE_MAXADDR,       /* highaddr */
1425                                NULL, NULL,      /* filter, filterarg */
1426                                size,    /* maxsize */
1427                                1,       /* nsegments */
1428                                size,    /* maxsegsize */
1429                                BUS_DMA_ALLOCNOW,        /* flags */
1430 #if __FreeBSD_version >= 502000
1431                                NULL,    /* lockfunc */
1432                                NULL,    /* lockfuncarg */
1433 #endif
1434                                &dma->dma_tag);
1435         if (r != 0) {
1436                 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1437                        "error %u\n", r);
1438                 goto fail_0;
1439         }
1440         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1441                              BUS_DMA_NOWAIT, &dma->dma_map);
1442         if (r != 0) {
1443                 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1444                        "error %u\n", r);
1445                 goto fail_1;
1446         }
1447         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1448                             size,
1449                             ixgb_dmamap_cb,
1450                             &dma->dma_paddr,
1451                             mapflags | BUS_DMA_NOWAIT);
1452         if (r != 0) {
1453                 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1454                        "error %u\n", r);
1455                 goto fail_2;
1456         }
1457         dma->dma_size = size;
1458         return (0);
1459 fail_2:
1460         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1461 fail_1:
1462         bus_dma_tag_destroy(dma->dma_tag);
1463 fail_0:
1464         dma->dma_tag = NULL;
1465         return (r);
1466 }
1467
1468
1469
1470 static void
1471 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1472 {
1473         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1474         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1475         bus_dma_tag_destroy(dma->dma_tag);
1476 }
1477
1478 /*********************************************************************
1479  *
1480  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1481  *  the information needed to transmit a packet on the wire.
1482  *
1483  **********************************************************************/
1484 static int
1485 ixgb_allocate_transmit_structures(struct adapter * adapter)
1486 {
1487         if (!(adapter->tx_buffer_area =
1488               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1489                                             adapter->num_tx_desc, M_DEVBUF,
1490                                             M_NOWAIT | M_ZERO))) {
1491                 device_printf(adapter->dev,
1492                     "Unable to allocate tx_buffer memory\n");
1493                 return ENOMEM;
1494         }
1495         bzero(adapter->tx_buffer_area,
1496               sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1497
1498         return 0;
1499 }
1500
1501 /*********************************************************************
1502  *
1503  *  Allocate and initialize transmit structures.
1504  *
1505  **********************************************************************/
1506 static int
1507 ixgb_setup_transmit_structures(struct adapter * adapter)
1508 {
1509         /*
1510          * Setup DMA descriptor areas.
1511          */
1512         if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
1513                                PAGE_SIZE, 0,    /* alignment, bounds */
1514                                BUS_SPACE_MAXADDR,       /* lowaddr */
1515                                BUS_SPACE_MAXADDR,       /* highaddr */
1516                                NULL, NULL,      /* filter, filterarg */
1517                                MCLBYTES * IXGB_MAX_SCATTER,     /* maxsize */
1518                                IXGB_MAX_SCATTER,        /* nsegments */
1519                                MCLBYTES,        /* maxsegsize */
1520                                BUS_DMA_ALLOCNOW,        /* flags */
1521 #if __FreeBSD_version >= 502000
1522                                NULL,    /* lockfunc */
1523                                NULL,    /* lockfuncarg */
1524 #endif
1525                                &adapter->txtag)) {
1526                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1527                 return (ENOMEM);
1528         }
1529         if (ixgb_allocate_transmit_structures(adapter))
1530                 return ENOMEM;
1531
1532         bzero((void *)adapter->tx_desc_base,
1533               (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1534
1535         adapter->next_avail_tx_desc = 0;
1536         adapter->oldest_used_tx_desc = 0;
1537
1538         /* Set number of descriptors available */
1539         adapter->num_tx_desc_avail = adapter->num_tx_desc;
1540
1541         /* Set checksum context */
1542         adapter->active_checksum_context = OFFLOAD_NONE;
1543
1544         return 0;
1545 }
1546
1547 /*********************************************************************
1548  *
1549  *  Enable transmit unit.
1550  *
1551  **********************************************************************/
1552 static void
1553 ixgb_initialize_transmit_unit(struct adapter * adapter)
1554 {
1555         u_int32_t       reg_tctl;
1556         u_int64_t       tdba = adapter->txdma.dma_paddr;
1557
1558         /* Setup the Base and Length of the Tx Descriptor Ring */
1559         IXGB_WRITE_REG(&adapter->hw, TDBAL,
1560                        (tdba & 0x00000000ffffffffULL));
1561         IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1562         IXGB_WRITE_REG(&adapter->hw, TDLEN,
1563                        adapter->num_tx_desc *
1564                        sizeof(struct ixgb_tx_desc));
1565
1566         /* Setup the HW Tx Head and Tail descriptor pointers */
1567         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1568         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1569
1570
1571         HW_DEBUGOUT2("Base = %x, Length = %x\n",
1572                      IXGB_READ_REG(&adapter->hw, TDBAL),
1573                      IXGB_READ_REG(&adapter->hw, TDLEN));
1574
1575         IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1576
1577
1578         /* Program the Transmit Control Register */
1579         reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1580         reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1581         IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1582
1583         /* Setup Transmit Descriptor Settings for this adapter */
1584         adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1585
1586         if (adapter->tx_int_delay > 0)
1587                 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1588         return;
1589 }
1590
1591 /*********************************************************************
1592  *
1593  *  Free all transmit related data structures.
1594  *
1595  **********************************************************************/
1596 static void
1597 ixgb_free_transmit_structures(struct adapter * adapter)
1598 {
1599         struct ixgb_buffer *tx_buffer;
1600         int             i;
1601
1602         INIT_DEBUGOUT("free_transmit_structures: begin");
1603
1604         if (adapter->tx_buffer_area != NULL) {
1605                 tx_buffer = adapter->tx_buffer_area;
1606                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1607                         if (tx_buffer->m_head != NULL) {
1608                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1609                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1610                                 m_freem(tx_buffer->m_head);
1611                         }
1612                         tx_buffer->m_head = NULL;
1613                 }
1614         }
1615         if (adapter->tx_buffer_area != NULL) {
1616                 free(adapter->tx_buffer_area, M_DEVBUF);
1617                 adapter->tx_buffer_area = NULL;
1618         }
1619         if (adapter->txtag != NULL) {
1620                 bus_dma_tag_destroy(adapter->txtag);
1621                 adapter->txtag = NULL;
1622         }
1623         return;
1624 }
1625
1626 /*********************************************************************
1627  *
1628  *  The offload context needs to be set when we transfer the first
1629  *  packet of a particular protocol (TCP/UDP). We change the
1630  *  context only if the protocol type changes.
1631  *
1632  **********************************************************************/
1633 static void
1634 ixgb_transmit_checksum_setup(struct adapter * adapter,
1635                              struct mbuf * mp,
1636                              u_int8_t * txd_popts)
1637 {
1638         struct ixgb_context_desc *TXD;
1639         struct ixgb_buffer *tx_buffer;
1640         int             curr_txd;
1641
1642         if (mp->m_pkthdr.csum_flags) {
1643
1644                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1645                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1646                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1647                                 return;
1648                         else
1649                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1650                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1651                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1652                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1653                                 return;
1654                         else
1655                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1656                 } else {
1657                         *txd_popts = 0;
1658                         return;
1659                 }
1660         } else {
1661                 *txd_popts = 0;
1662                 return;
1663         }
1664
1665         /*
1666          * If we reach this point, the checksum offload context needs to be
1667          * reset.
1668          */
1669         curr_txd = adapter->next_avail_tx_desc;
1670         tx_buffer = &adapter->tx_buffer_area[curr_txd];
1671         TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1672
1673
1674         TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1675         TXD->tucse = 0;
1676
1677         TXD->mss = 0;
1678
1679         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1680                 TXD->tucso =
1681                         ENET_HEADER_SIZE + sizeof(struct ip) +
1682                         offsetof(struct tcphdr, th_sum);
1683         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1684                 TXD->tucso =
1685                         ENET_HEADER_SIZE + sizeof(struct ip) +
1686                         offsetof(struct udphdr, uh_sum);
1687         }
1688         TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1689
1690         tx_buffer->m_head = NULL;
1691
1692         if (++curr_txd == adapter->num_tx_desc)
1693                 curr_txd = 0;
1694
1695         adapter->num_tx_desc_avail--;
1696         adapter->next_avail_tx_desc = curr_txd;
1697         return;
1698 }
1699
1700 /**********************************************************************
1701  *
1702  *  Examine each tx_buffer in the used queue. If the hardware is done
1703  *  processing the packet then free associated resources. The
1704  *  tx_buffer is put back on the free queue.
1705  *
1706  **********************************************************************/
1707 static void
1708 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1709 {
1710         int             i, num_avail;
1711         struct ixgb_buffer *tx_buffer;
1712         struct ixgb_tx_desc *tx_desc;
1713
1714         IXGB_LOCK_ASSERT(adapter);
1715
1716         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1717                 return;
1718
1719 #ifdef _SV_
1720         adapter->clean_tx_interrupts++;
1721 #endif
1722         num_avail = adapter->num_tx_desc_avail;
1723         i = adapter->oldest_used_tx_desc;
1724
1725         tx_buffer = &adapter->tx_buffer_area[i];
1726         tx_desc = &adapter->tx_desc_base[i];
1727
1728         while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1729
1730                 tx_desc->status = 0;
1731                 num_avail++;
1732
1733                 if (tx_buffer->m_head) {
1734                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1735                                         BUS_DMASYNC_POSTWRITE);
1736                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1737                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1738                         m_freem(tx_buffer->m_head);
1739                         tx_buffer->m_head = NULL;
1740                 }
1741                 if (++i == adapter->num_tx_desc)
1742                         i = 0;
1743
1744                 tx_buffer = &adapter->tx_buffer_area[i];
1745                 tx_desc = &adapter->tx_desc_base[i];
1746         }
1747
1748         adapter->oldest_used_tx_desc = i;
1749
1750         /*
1751          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1752          * it is OK to send packets. If there are no pending descriptors,
1753          * clear the timeout. Otherwise, if some descriptors have been freed,
1754          * restart the timeout.
1755          */
1756         if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1757                 struct ifnet   *ifp = adapter->ifp;
1758
1759                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1760                 if (num_avail == adapter->num_tx_desc)
1761                         adapter->tx_timer = 0;
1762                 else if (num_avail == adapter->num_tx_desc_avail)
1763                         adapter->tx_timer = IXGB_TX_TIMEOUT;
1764         }
1765         adapter->num_tx_desc_avail = num_avail;
1766         return;
1767 }
1768
1769
1770 /*********************************************************************
1771  *
1772  *  Get a buffer from system mbuf buffer pool.
1773  *
1774  **********************************************************************/
1775 static int
1776 ixgb_get_buf(int i, struct adapter * adapter,
1777              struct mbuf * nmp)
1778 {
1779         register struct mbuf *mp = nmp;
1780         struct ixgb_buffer *rx_buffer;
1781         struct ifnet   *ifp;
1782         bus_addr_t      paddr;
1783         int             error;
1784
1785         ifp = adapter->ifp;
1786
1787         if (mp == NULL) {
1788
1789                 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1790
1791                 if (mp == NULL) {
1792                         adapter->mbuf_alloc_failed++;
1793                         return (ENOBUFS);
1794                 }
1795                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1796         } else {
1797                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1798                 mp->m_data = mp->m_ext.ext_buf;
1799                 mp->m_next = NULL;
1800         }
1801
1802         if (ifp->if_mtu <= ETHERMTU) {
1803                 m_adj(mp, ETHER_ALIGN);
1804         }
1805         rx_buffer = &adapter->rx_buffer_area[i];
1806
1807         /*
1808          * Using memory from the mbuf cluster pool, invoke the bus_dma
1809          * machinery to arrange the memory mapping.
1810          */
1811         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1812                                 mtod(mp, void *), mp->m_len,
1813                                 ixgb_dmamap_cb, &paddr, 0);
1814         if (error) {
1815                 m_free(mp);
1816                 return (error);
1817         }
1818         rx_buffer->m_head = mp;
1819         adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1820         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1821
1822         return (0);
1823 }
1824
1825 /*********************************************************************
1826  *
1827  *  Allocate memory for rx_buffer structures. Since we use one
1828  *  rx_buffer per received packet, the maximum number of rx_buffer's
1829  *  that we'll need is equal to the number of receive descriptors
1830  *  that we've allocated.
1831  *
1832  **********************************************************************/
1833 static int
1834 ixgb_allocate_receive_structures(struct adapter * adapter)
1835 {
1836         int             i, error;
1837         struct ixgb_buffer *rx_buffer;
1838
1839         if (!(adapter->rx_buffer_area =
1840               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1841                                             adapter->num_rx_desc, M_DEVBUF,
1842                                             M_NOWAIT | M_ZERO))) {
1843                 device_printf(adapter->dev,
1844                     "Unable to allocate rx_buffer memory\n");
1845                 return (ENOMEM);
1846         }
1847         bzero(adapter->rx_buffer_area,
1848               sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1849
1850         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1851                                    PAGE_SIZE, 0,        /* alignment, bounds */
1852                                    BUS_SPACE_MAXADDR,   /* lowaddr */
1853                                    BUS_SPACE_MAXADDR,   /* highaddr */
1854                                    NULL, NULL,  /* filter, filterarg */
1855                                    MCLBYTES,    /* maxsize */
1856                                    1,   /* nsegments */
1857                                    MCLBYTES,    /* maxsegsize */
1858                                    BUS_DMA_ALLOCNOW,    /* flags */
1859 #if __FreeBSD_version >= 502000
1860                                    NULL,        /* lockfunc */
1861                                    NULL,        /* lockfuncarg */
1862 #endif
1863                                    &adapter->rxtag);
1864         if (error != 0) {
1865                 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1866                        "bus_dma_tag_create failed; error %u\n",
1867                        error);
1868                 goto fail_0;
1869         }
1870         rx_buffer = adapter->rx_buffer_area;
1871         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1872                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1873                                           &rx_buffer->map);
1874                 if (error != 0) {
1875                         device_printf(adapter->dev,
1876                                "ixgb_allocate_receive_structures: "
1877                                "bus_dmamap_create failed; error %u\n",
1878                                error);
1879                         goto fail_1;
1880                 }
1881         }
1882
1883         for (i = 0; i < adapter->num_rx_desc; i++) {
1884                 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1885                         adapter->rx_buffer_area[i].m_head = NULL;
1886                         adapter->rx_desc_base[i].buff_addr = 0;
1887                         return (ENOBUFS);
1888                 }
1889         }
1890
1891         return (0);
1892 fail_1:
1893         bus_dma_tag_destroy(adapter->rxtag);
1894 fail_0:
1895         adapter->rxtag = NULL;
1896         free(adapter->rx_buffer_area, M_DEVBUF);
1897         adapter->rx_buffer_area = NULL;
1898         return (error);
1899 }
1900
1901 /*********************************************************************
1902  *
1903  *  Allocate and initialize receive structures.
1904  *
1905  **********************************************************************/
1906 static int
1907 ixgb_setup_receive_structures(struct adapter * adapter)
1908 {
1909         bzero((void *)adapter->rx_desc_base,
1910               (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1911
1912         if (ixgb_allocate_receive_structures(adapter))
1913                 return ENOMEM;
1914
1915         /* Setup our descriptor pointers */
1916         adapter->next_rx_desc_to_check = 0;
1917         adapter->next_rx_desc_to_use = 0;
1918         return (0);
1919 }
1920
1921 /*********************************************************************
1922  *
1923  *  Enable receive unit.
1924  *
1925  **********************************************************************/
1926 static void
1927 ixgb_initialize_receive_unit(struct adapter * adapter)
1928 {
1929         u_int32_t       reg_rctl;
1930         u_int32_t       reg_rxcsum;
1931         u_int32_t       reg_rxdctl;
1932         struct ifnet   *ifp;
1933         u_int64_t       rdba = adapter->rxdma.dma_paddr;
1934
1935         ifp = adapter->ifp;
1936
1937         /*
1938          * Make sure receives are disabled while setting up the descriptor
1939          * ring
1940          */
1941         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1942         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1943
1944         /* Set the Receive Delay Timer Register */
1945         IXGB_WRITE_REG(&adapter->hw, RDTR,
1946                        adapter->rx_int_delay);
1947
1948
1949         /* Setup the Base and Length of the Rx Descriptor Ring */
1950         IXGB_WRITE_REG(&adapter->hw, RDBAL,
1951                        (rdba & 0x00000000ffffffffULL));
1952         IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1953         IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1954                        sizeof(struct ixgb_rx_desc));
1955
1956         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1957         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1958
1959         IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1960
1961
1962
1963         reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1964                 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1965                 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1966         IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1967
1968
1969         adapter->raidc = 1;
1970         if (adapter->raidc) {
1971                 uint32_t        raidc;
1972                 uint8_t         poll_threshold;
1973 #define IXGB_RAIDC_POLL_DEFAULT 120
1974
1975                 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1976                 poll_threshold >>= 1;
1977                 poll_threshold &= 0x3F;
1978                 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1979                         (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1980                         (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1981                         poll_threshold;
1982                 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1983         }
1984         /* Enable Receive Checksum Offload for TCP and UDP ? */
1985         if (ifp->if_capenable & IFCAP_RXCSUM) {
1986                 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1987                 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1988                 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1989         }
1990         /* Setup the Receive Control Register */
1991         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1992         reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1993         reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1994                 IXGB_RCTL_CFF |
1995                 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1996
1997         switch (adapter->rx_buffer_len) {
1998         default:
1999         case IXGB_RXBUFFER_2048:
2000                 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2001                 break;
2002         case IXGB_RXBUFFER_4096:
2003                 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2004                 break;
2005         case IXGB_RXBUFFER_8192:
2006                 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2007                 break;
2008         case IXGB_RXBUFFER_16384:
2009                 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2010                 break;
2011         }
2012
2013         reg_rctl |= IXGB_RCTL_RXEN;
2014
2015
2016         /* Enable Receives */
2017         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2018
2019         return;
2020 }
2021
2022 /*********************************************************************
2023  *
2024  *  Free receive related data structures.
2025  *
2026  **********************************************************************/
2027 static void
2028 ixgb_free_receive_structures(struct adapter * adapter)
2029 {
2030         struct ixgb_buffer *rx_buffer;
2031         int             i;
2032
2033         INIT_DEBUGOUT("free_receive_structures: begin");
2034
2035         if (adapter->rx_buffer_area != NULL) {
2036                 rx_buffer = adapter->rx_buffer_area;
2037                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2038                         if (rx_buffer->map != NULL) {
2039                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2040                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2041                         }
2042                         if (rx_buffer->m_head != NULL)
2043                                 m_freem(rx_buffer->m_head);
2044                         rx_buffer->m_head = NULL;
2045                 }
2046         }
2047         if (adapter->rx_buffer_area != NULL) {
2048                 free(adapter->rx_buffer_area, M_DEVBUF);
2049                 adapter->rx_buffer_area = NULL;
2050         }
2051         if (adapter->rxtag != NULL) {
2052                 bus_dma_tag_destroy(adapter->rxtag);
2053                 adapter->rxtag = NULL;
2054         }
2055         return;
2056 }
2057
2058 /*********************************************************************
2059  *
2060  *  This routine executes in interrupt context. It replenishes
2061  *  the mbufs in the descriptor and sends data which has been
2062  *  dma'ed into host memory to upper layer.
2063  *
2064  *  We loop at most count times if count is > 0, or until done if
2065  *  count < 0.
2066  *
2067  *********************************************************************/
2068 static int
2069 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2070 {
2071         struct ifnet   *ifp;
2072         struct mbuf    *mp;
2073 #if __FreeBSD_version < 500000
2074         struct ether_header *eh;
2075 #endif
2076         int             eop = 0;
2077         int             len;
2078         u_int8_t        accept_frame = 0;
2079         int             i;
2080         int             next_to_use = 0;
2081         int             eop_desc;
2082         int             rx_npkts = 0;
2083         /* Pointer to the receive descriptor being examined. */
2084         struct ixgb_rx_desc *current_desc;
2085
2086         IXGB_LOCK_ASSERT(adapter);
2087
2088         ifp = adapter->ifp;
2089         i = adapter->next_rx_desc_to_check;
2090         next_to_use = adapter->next_rx_desc_to_use;
2091         eop_desc = adapter->next_rx_desc_to_check;
2092         current_desc = &adapter->rx_desc_base[i];
2093
2094         if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2095 #ifdef _SV_
2096                 adapter->no_pkts_avail++;
2097 #endif
2098                 return (rx_npkts);
2099         }
2100         while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2101
2102                 mp = adapter->rx_buffer_area[i].m_head;
2103                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2104                                 BUS_DMASYNC_POSTREAD);
2105                 accept_frame = 1;
2106                 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2107                         count--;
2108                         eop = 1;
2109                 } else {
2110                         eop = 0;
2111                 }
2112                 len = current_desc->length;
2113
2114                 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2115                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2116                                             IXGB_RX_DESC_ERRORS_RXE)) {
2117                         accept_frame = 0;
2118                 }
2119                 if (accept_frame) {
2120
2121                         /* Assign correct length to the current fragment */
2122                         mp->m_len = len;
2123
2124                         if (adapter->fmp == NULL) {
2125                                 mp->m_pkthdr.len = len;
2126                                 adapter->fmp = mp;      /* Store the first mbuf */
2127                                 adapter->lmp = mp;
2128                         } else {
2129                                 /* Chain mbuf's together */
2130                                 mp->m_flags &= ~M_PKTHDR;
2131                                 adapter->lmp->m_next = mp;
2132                                 adapter->lmp = adapter->lmp->m_next;
2133                                 adapter->fmp->m_pkthdr.len += len;
2134                         }
2135
2136                         if (eop) {
2137                                 eop_desc = i;
2138                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2139
2140 #if __FreeBSD_version < 500000
2141                                 eh = mtod(adapter->fmp, struct ether_header *);
2142
2143                                 /* Remove ethernet header from mbuf */
2144                                 m_adj(adapter->fmp, sizeof(struct ether_header));
2145                                 ixgb_receive_checksum(adapter, current_desc,
2146                                                       adapter->fmp);
2147
2148                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2149                                         VLAN_INPUT_TAG(eh, adapter->fmp,
2150                                                      current_desc->special);
2151                                 else
2152                                         ether_input(ifp, eh, adapter->fmp);
2153 #else
2154                                 ixgb_receive_checksum(adapter, current_desc,
2155                                                       adapter->fmp);
2156 #if __FreeBSD_version < 700000
2157                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2158                                         VLAN_INPUT_TAG(ifp, adapter->fmp,
2159                                                        current_desc->special);
2160 #else
2161                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2162                                         adapter->fmp->m_pkthdr.ether_vtag =
2163                                             current_desc->special;
2164                                         adapter->fmp->m_flags |= M_VLANTAG;
2165                                 }
2166 #endif
2167
2168                                 if (adapter->fmp != NULL) {
2169                                         IXGB_UNLOCK(adapter);
2170                                         (*ifp->if_input) (ifp, adapter->fmp);
2171                                         IXGB_LOCK(adapter);
2172                                         rx_npkts++;
2173                                 }
2174 #endif
2175                                 adapter->fmp = NULL;
2176                                 adapter->lmp = NULL;
2177                         }
2178                         adapter->rx_buffer_area[i].m_head = NULL;
2179                 } else {
2180                         adapter->dropped_pkts++;
2181                         if (adapter->fmp != NULL)
2182                                 m_freem(adapter->fmp);
2183                         adapter->fmp = NULL;
2184                         adapter->lmp = NULL;
2185                 }
2186
2187                 /* Zero out the receive descriptors status  */
2188                 current_desc->status = 0;
2189
2190                 /* Advance our pointers to the next descriptor */
2191                 if (++i == adapter->num_rx_desc) {
2192                         i = 0;
2193                         current_desc = adapter->rx_desc_base;
2194                 } else
2195                         current_desc++;
2196         }
2197         adapter->next_rx_desc_to_check = i;
2198
2199         if (--i < 0)
2200                 i = (adapter->num_rx_desc - 1);
2201
2202         /*
2203          * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2204          * memory corruption). Avoid using and re-submitting the most recently received RX
2205          * descriptor back to hardware.
2206          *
2207          * if(Last written back descriptor == EOP bit set descriptor)
2208          *      then avoid re-submitting the most recently received RX descriptor 
2209          *      back to hardware.
2210          * if(Last written back descriptor != EOP bit set descriptor)
2211          *      then avoid re-submitting the most recently received RX descriptors
2212          *      till last EOP bit set descriptor. 
2213          */
2214         if (eop_desc != i) {
2215                 if (++eop_desc == adapter->num_rx_desc)
2216                         eop_desc = 0;
2217                 i = eop_desc;
2218         }
2219         /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2220         while (next_to_use != i) {
2221                 current_desc = &adapter->rx_desc_base[next_to_use];
2222                 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2223                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2224                                              IXGB_RX_DESC_ERRORS_RXE))) {
2225                         mp = adapter->rx_buffer_area[next_to_use].m_head;
2226                         ixgb_get_buf(next_to_use, adapter, mp);
2227                 } else {
2228                         if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2229                                 break;
2230                 }
2231                 /* Advance our pointers to the next descriptor */
2232                 if (++next_to_use == adapter->num_rx_desc) {
2233                         next_to_use = 0;
2234                         current_desc = adapter->rx_desc_base;
2235                 } else
2236                         current_desc++;
2237         }
2238         adapter->next_rx_desc_to_use = next_to_use;
2239         if (--next_to_use < 0)
2240                 next_to_use = (adapter->num_rx_desc - 1);
2241         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2242         IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2243
2244         return (rx_npkts);
2245 }
2246
2247 /*********************************************************************
2248  *
2249  *  Verify that the hardware indicated that the checksum is valid.
2250  *  Inform the stack about the status of checksum so that stack
2251  *  doesn't spend time verifying the checksum.
2252  *
2253  *********************************************************************/
2254 static void
2255 ixgb_receive_checksum(struct adapter * adapter,
2256                       struct ixgb_rx_desc * rx_desc,
2257                       struct mbuf * mp)
2258 {
2259         if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2260                 mp->m_pkthdr.csum_flags = 0;
2261                 return;
2262         }
2263         if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2264                 /* Did it pass? */
2265                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2266                         /* IP Checksum Good */
2267                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2268                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2269
2270                 } else {
2271                         mp->m_pkthdr.csum_flags = 0;
2272                 }
2273         }
2274         if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2275                 /* Did it pass? */
2276                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2277                         mp->m_pkthdr.csum_flags |=
2278                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2279                         mp->m_pkthdr.csum_data = htons(0xffff);
2280                 }
2281         }
2282         return;
2283 }
2284
2285
2286 static void
2287 ixgb_enable_vlans(struct adapter * adapter)
2288 {
2289         uint32_t        ctrl;
2290
2291         ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2292         ctrl |= IXGB_CTRL0_VME;
2293         IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2294
2295         return;
2296 }
2297
2298
2299 static void
2300 ixgb_enable_intr(struct adapter * adapter)
2301 {
2302         IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2303                             IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2304         return;
2305 }
2306
2307 static void
2308 ixgb_disable_intr(struct adapter * adapter)
2309 {
2310         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2311         return;
2312 }
2313
2314 void
2315 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2316                    uint32_t reg,
2317                    uint16_t * value)
2318 {
2319         pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2320                          *value, 2);
2321 }
2322
2323 /**********************************************************************
2324  *
2325  *  Update the board statistics counters.
2326  *
2327  **********************************************************************/
2328 static void
2329 ixgb_update_stats_counters(struct adapter * adapter)
2330 {
2331
2332         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2333         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2334         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2335         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2336         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2337         adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2338         adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2339         adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2340         adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2341         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2342
2343         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2344         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2345         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2346         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2347         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2348         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2349         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2350         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2351         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2352         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2353         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2354         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2355         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2356         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2357         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2358         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2359         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2360         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2361         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2362         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2363         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2364         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2365         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2366         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2367         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2368         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2369         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2370
2371         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2372         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2373         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2374         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2375         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2376         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2377         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2378         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2379         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2380         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2381         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2382         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2383         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2384         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2385         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2386         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2387         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2388         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2389         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2390         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2391         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2392         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2393 }
2394
2395 static uint64_t
2396 ixgb_get_counter(struct ifnet *ifp, ift_counter cnt)
2397 {
2398         struct adapter *adapter;
2399
2400         adapter = if_getsoftc(ifp);
2401
2402         switch (cnt) {
2403         case IFCOUNTER_IPACKETS:
2404                 return (adapter->stats.gprcl);
2405         case IFCOUNTER_OPACKETS:
2406                 return ( adapter->stats.gptcl);
2407         case IFCOUNTER_IBYTES:
2408                 return (adapter->stats.gorcl);
2409         case IFCOUNTER_OBYTES:
2410                 return (adapter->stats.gotcl);
2411         case IFCOUNTER_IMCASTS:
2412                 return ( adapter->stats.mprcl);
2413         case IFCOUNTER_COLLISIONS:
2414                 return (0);
2415         case IFCOUNTER_IERRORS:
2416                 return (adapter->dropped_pkts + adapter->stats.crcerrs +
2417                     adapter->stats.rnbc + adapter->stats.mpc +
2418                     adapter->stats.rlec);
2419         default:
2420                 return (if_get_counter_default(ifp, cnt));
2421         }
2422 }
2423
2424 /**********************************************************************
2425  *
2426  *  This routine is called only when ixgb_display_debug_stats is enabled.
2427  *  This routine provides a way to take a look at important statistics
2428  *  maintained by the driver and hardware.
2429  *
2430  **********************************************************************/
2431 static void
2432 ixgb_print_hw_stats(struct adapter * adapter)
2433 {
2434         char            buf_speed[100], buf_type[100];
2435         ixgb_bus_speed  bus_speed;
2436         ixgb_bus_type   bus_type;
2437         device_t dev;
2438
2439         dev = adapter->dev;
2440 #ifdef _SV_
2441         device_printf(dev, "Packets not Avail = %ld\n",
2442                adapter->no_pkts_avail);
2443         device_printf(dev, "CleanTxInterrupts = %ld\n",
2444                adapter->clean_tx_interrupts);
2445         device_printf(dev, "ICR RXDMT0 = %lld\n",
2446                (long long)adapter->sv_stats.icr_rxdmt0);
2447         device_printf(dev, "ICR RXO = %lld\n",
2448                (long long)adapter->sv_stats.icr_rxo);
2449         device_printf(dev, "ICR RXT0 = %lld\n",
2450                (long long)adapter->sv_stats.icr_rxt0);
2451         device_printf(dev, "ICR TXDW = %lld\n",
2452                (long long)adapter->sv_stats.icr_TXDW);
2453 #endif                          /* _SV_ */
2454
2455         bus_speed = adapter->hw.bus.speed;
2456         bus_type = adapter->hw.bus.type;
2457         sprintf(buf_speed,
2458                 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2459                 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2460                 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2461                 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2462                 "UNKNOWN");
2463         device_printf(dev, "PCI_Bus_Speed = %s\n",
2464                buf_speed);
2465
2466         sprintf(buf_type,
2467                 bus_type == ixgb_bus_type_pci ? "PCI" :
2468                 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2469                 "UNKNOWN");
2470         device_printf(dev, "PCI_Bus_Type = %s\n",
2471                buf_type);
2472
2473         device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2474                adapter->no_tx_desc_avail1);
2475         device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2476                adapter->no_tx_desc_avail2);
2477         device_printf(dev, "Std Mbuf Failed = %ld\n",
2478                adapter->mbuf_alloc_failed);
2479         device_printf(dev, "Std Cluster Failed = %ld\n",
2480                adapter->mbuf_cluster_failed);
2481
2482         device_printf(dev, "Defer count = %lld\n",
2483                (long long)adapter->stats.dc);
2484         device_printf(dev, "Missed Packets = %lld\n",
2485                (long long)adapter->stats.mpc);
2486         device_printf(dev, "Receive No Buffers = %lld\n",
2487                (long long)adapter->stats.rnbc);
2488         device_printf(dev, "Receive length errors = %lld\n",
2489                (long long)adapter->stats.rlec);
2490         device_printf(dev, "Crc errors = %lld\n",
2491                (long long)adapter->stats.crcerrs);
2492         device_printf(dev, "Driver dropped packets = %ld\n",
2493                adapter->dropped_pkts);
2494
2495         device_printf(dev, "XON Rcvd = %lld\n",
2496                (long long)adapter->stats.xonrxc);
2497         device_printf(dev, "XON Xmtd = %lld\n",
2498                (long long)adapter->stats.xontxc);
2499         device_printf(dev, "XOFF Rcvd = %lld\n",
2500                (long long)adapter->stats.xoffrxc);
2501         device_printf(dev, "XOFF Xmtd = %lld\n",
2502                (long long)adapter->stats.xofftxc);
2503
2504         device_printf(dev, "Good Packets Rcvd = %lld\n",
2505                (long long)adapter->stats.gprcl);
2506         device_printf(dev, "Good Packets Xmtd = %lld\n",
2507                (long long)adapter->stats.gptcl);
2508
2509         device_printf(dev, "Jumbo frames recvd = %lld\n",
2510                (long long)adapter->stats.jprcl);
2511         device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2512                (long long)adapter->stats.jptcl);
2513
2514         return;
2515
2516 }
2517
2518 static int
2519 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2520 {
2521         int             error;
2522         int             result;
2523         struct adapter *adapter;
2524
2525         result = -1;
2526         error = sysctl_handle_int(oidp, &result, 0, req);
2527
2528         if (error || !req->newptr)
2529                 return (error);
2530
2531         if (result == 1) {
2532                 adapter = (struct adapter *) arg1;
2533                 ixgb_print_hw_stats(adapter);
2534         }
2535         return error;
2536 }