]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/ixgb/if_ixgb.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / ixgb / if_ixgb.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2004, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/ixgb/if_ixgb.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics
44  *********************************************************************/
45 int             ixgb_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Linked list of board private structures for all NICs found
49  *********************************************************************/
50
51 struct adapter *ixgb_adapter_list = NULL;
52
53
54
55 /*********************************************************************
56  *  Driver version
57  *********************************************************************/
58
59 char            ixgb_driver_version[] = "1.0.6";
60 char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62 /*********************************************************************
63  *  PCI Device ID Table
64  *
65  *  Used by probe to select devices to load on
66  *  Last field stores an index into ixgb_strings
67  *  Last entry must be all 0s
68  *
69  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70  *********************************************************************/
71
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73 {
74         /* Intel(R) PRO/10000 Network Connection */
75         {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76         {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77         /* required last entry */
78         {0, 0, 0, 0, 0}
79 };
80
81 /*********************************************************************
82  *  Table of branding strings for all supported NICs.
83  *********************************************************************/
84
85 static char    *ixgb_strings[] = {
86         "Intel(R) PRO/10GbE Network Driver"
87 };
88
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 static int      ixgb_probe(device_t);
93 static int      ixgb_attach(device_t);
94 static int      ixgb_detach(device_t);
95 static int      ixgb_shutdown(device_t);
96 static void     ixgb_intr(void *);
97 static void     ixgb_start(struct ifnet *);
98 static void     ixgb_start_locked(struct ifnet *);
99 static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void     ixgb_watchdog(struct adapter *);
101 static void     ixgb_init(void *);
102 static void     ixgb_init_locked(struct adapter *);
103 static void     ixgb_stop(void *);
104 static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int      ixgb_media_change(struct ifnet *);
106 static void     ixgb_identify_hardware(struct adapter *);
107 static int      ixgb_allocate_pci_resources(struct adapter *);
108 static void     ixgb_free_pci_resources(struct adapter *);
109 static void     ixgb_local_timer(void *);
110 static int      ixgb_hardware_init(struct adapter *);
111 static int      ixgb_setup_interface(device_t, struct adapter *);
112 static int      ixgb_setup_transmit_structures(struct adapter *);
113 static void     ixgb_initialize_transmit_unit(struct adapter *);
114 static int      ixgb_setup_receive_structures(struct adapter *);
115 static void     ixgb_initialize_receive_unit(struct adapter *);
116 static void     ixgb_enable_intr(struct adapter *);
117 static void     ixgb_disable_intr(struct adapter *);
118 static void     ixgb_free_transmit_structures(struct adapter *);
119 static void     ixgb_free_receive_structures(struct adapter *);
120 static void     ixgb_update_stats_counters(struct adapter *);
121 static void     ixgb_clean_transmit_interrupts(struct adapter *);
122 static int      ixgb_allocate_receive_structures(struct adapter *);
123 static int      ixgb_allocate_transmit_structures(struct adapter *);
124 static int      ixgb_process_receive_interrupts(struct adapter *, int);
125 static void 
126 ixgb_receive_checksum(struct adapter *,
127                       struct ixgb_rx_desc * rx_desc,
128                       struct mbuf *);
129 static void 
130 ixgb_transmit_checksum_setup(struct adapter *,
131                              struct mbuf *,
132                              u_int8_t *);
133 static void     ixgb_set_promisc(struct adapter *);
134 static void     ixgb_disable_promisc(struct adapter *);
135 static void     ixgb_set_multi(struct adapter *);
136 static void     ixgb_print_hw_stats(struct adapter *);
137 static void     ixgb_print_link_status(struct adapter *);
138 static int 
139 ixgb_get_buf(int i, struct adapter *,
140              struct mbuf *);
141 static void     ixgb_enable_vlans(struct adapter * adapter);
142 static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144 static int 
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146                 struct ixgb_dma_alloc *, int);
147 static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
150 #endif
151
152 /*********************************************************************
153  *  FreeBSD Device Interface Entry Points
154  *********************************************************************/
155
156 static device_method_t ixgb_methods[] = {
157         /* Device interface */
158         DEVMETHOD(device_probe, ixgb_probe),
159         DEVMETHOD(device_attach, ixgb_attach),
160         DEVMETHOD(device_detach, ixgb_detach),
161         DEVMETHOD(device_shutdown, ixgb_shutdown),
162
163         DEVMETHOD_END
164 };
165
166 static driver_t ixgb_driver = {
167         "ixgb", ixgb_methods, sizeof(struct adapter),
168 };
169
170 static devclass_t ixgb_devclass;
171 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
172
173 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
174 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
175
176 /* some defines for controlling descriptor fetches in h/w */
177 #define RXDCTL_PTHRESH_DEFAULT 128      /* chip considers prefech below this */
178 #define RXDCTL_HTHRESH_DEFAULT 16       /* chip will only prefetch if tail is
179                                          * pushed this many descriptors from
180                                          * head */
181 #define RXDCTL_WTHRESH_DEFAULT 0        /* chip writes back at this many or RXT0 */
182
183
184 /*********************************************************************
185  *  Device identification routine
186  *
187  *  ixgb_probe determines if the driver should be loaded on
188  *  adapter based on PCI vendor/device id of the adapter.
189  *
190  *  return 0 on success, positive on failure
191  *********************************************************************/
192
193 static int
194 ixgb_probe(device_t dev)
195 {
196         ixgb_vendor_info_t *ent;
197
198         u_int16_t       pci_vendor_id = 0;
199         u_int16_t       pci_device_id = 0;
200         u_int16_t       pci_subvendor_id = 0;
201         u_int16_t       pci_subdevice_id = 0;
202         char            adapter_name[60];
203
204         INIT_DEBUGOUT("ixgb_probe: begin");
205
206         pci_vendor_id = pci_get_vendor(dev);
207         if (pci_vendor_id != IXGB_VENDOR_ID)
208                 return (ENXIO);
209
210         pci_device_id = pci_get_device(dev);
211         pci_subvendor_id = pci_get_subvendor(dev);
212         pci_subdevice_id = pci_get_subdevice(dev);
213
214         ent = ixgb_vendor_info_array;
215         while (ent->vendor_id != 0) {
216                 if ((pci_vendor_id == ent->vendor_id) &&
217                     (pci_device_id == ent->device_id) &&
218
219                     ((pci_subvendor_id == ent->subvendor_id) ||
220                      (ent->subvendor_id == PCI_ANY_ID)) &&
221
222                     ((pci_subdevice_id == ent->subdevice_id) ||
223                      (ent->subdevice_id == PCI_ANY_ID))) {
224                         sprintf(adapter_name, "%s, Version - %s",
225                                 ixgb_strings[ent->index],
226                                 ixgb_driver_version);
227                         device_set_desc_copy(dev, adapter_name);
228                         return (BUS_PROBE_DEFAULT);
229                 }
230                 ent++;
231         }
232
233         return (ENXIO);
234 }
235
236 /*********************************************************************
237  *  Device initialization routine
238  *
239  *  The attach entry point is called when the driver is being loaded.
240  *  This routine identifies the type of hardware, allocates all resources
241  *  and initializes the hardware.
242  *
243  *  return 0 on success, positive on failure
244  *********************************************************************/
245
246 static int
247 ixgb_attach(device_t dev)
248 {
249         struct adapter *adapter;
250         int             tsize, rsize;
251         int             error = 0;
252
253         device_printf(dev, "%s\n", ixgb_copyright);
254         INIT_DEBUGOUT("ixgb_attach: begin");
255
256         /* Allocate, clear, and link in our adapter structure */
257         if (!(adapter = device_get_softc(dev))) {
258                 device_printf(dev, "adapter structure allocation failed\n");
259                 return (ENOMEM);
260         }
261         bzero(adapter, sizeof(struct adapter));
262         adapter->dev = dev;
263         adapter->osdep.dev = dev;
264         IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
265
266         if (ixgb_adapter_list != NULL)
267                 ixgb_adapter_list->prev = adapter;
268         adapter->next = ixgb_adapter_list;
269         ixgb_adapter_list = adapter;
270
271         /* SYSCTL APIs */
272         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
275                         (void *)adapter, 0,
276                         ixgb_sysctl_stats, "I", "Statistics");
277
278         callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
279
280         /* Determine hardware revision */
281         ixgb_identify_hardware(adapter);
282
283         /* Parameters (to be read from user) */
284         adapter->num_tx_desc = IXGB_MAX_TXD;
285         adapter->num_rx_desc = IXGB_MAX_RXD;
286         adapter->tx_int_delay = TIDV;
287         adapter->rx_int_delay = RDTR;
288         adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
289
290         adapter->hw.fc.high_water = FCRTH;
291         adapter->hw.fc.low_water = FCRTL;
292         adapter->hw.fc.pause_time = FCPAUSE;
293         adapter->hw.fc.send_xon = TRUE;
294         adapter->hw.fc.type = FLOW_CONTROL;
295
296
297         /* Set the max frame size assuming standard ethernet sized frames */
298         adapter->hw.max_frame_size =
299                 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
300
301         if (ixgb_allocate_pci_resources(adapter)) {
302                 device_printf(dev, "Allocation of PCI resources failed\n");
303                 error = ENXIO;
304                 goto err_pci;
305         }
306         tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
307                              sizeof(struct ixgb_tx_desc), 4096);
308
309         /* Allocate Transmit Descriptor ring */
310         if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
311                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
312                 error = ENOMEM;
313                 goto err_tx_desc;
314         }
315         adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
316
317         rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
318                              sizeof(struct ixgb_rx_desc), 4096);
319
320         /* Allocate Receive Descriptor ring */
321         if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
322                 device_printf(dev, "Unable to allocate rx_desc memory\n");
323                 error = ENOMEM;
324                 goto err_rx_desc;
325         }
326         adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
327
328         /* Allocate multicast array memory. */
329         adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
330             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
331         if (adapter->mta == NULL) {
332                 device_printf(dev, "Can not allocate multicast setup array\n");
333                 error = ENOMEM;
334                 goto err_hw_init;
335         }
336
337         /* Initialize the hardware */
338         if (ixgb_hardware_init(adapter)) {
339                 device_printf(dev, "Unable to initialize the hardware\n");
340                 error = EIO;
341                 goto err_hw_init;
342         }
343         /* Setup OS specific network interface */
344         if (ixgb_setup_interface(dev, adapter) != 0)
345                 goto err_hw_init;
346
347         /* Initialize statistics */
348         ixgb_clear_hw_cntrs(&adapter->hw);
349         ixgb_update_stats_counters(adapter);
350
351         INIT_DEBUGOUT("ixgb_attach: end");
352         return (0);
353
354 err_hw_init:
355         ixgb_dma_free(adapter, &adapter->rxdma);
356 err_rx_desc:
357         ixgb_dma_free(adapter, &adapter->txdma);
358 err_tx_desc:
359 err_pci:
360         if (adapter->ifp != NULL)
361                 if_free(adapter->ifp);
362         ixgb_free_pci_resources(adapter);
363         sysctl_ctx_free(&adapter->sysctl_ctx);
364         free(adapter->mta, M_DEVBUF);
365         return (error);
366
367 }
368
369 /*********************************************************************
370  *  Device removal routine
371  *
372  *  The detach entry point is called when the driver is being removed.
373  *  This routine stops the adapter and deallocates all the resources
374  *  that were allocated for driver operation.
375  *
376  *  return 0 on success, positive on failure
377  *********************************************************************/
378
379 static int
380 ixgb_detach(device_t dev)
381 {
382         struct adapter *adapter = device_get_softc(dev);
383         struct ifnet   *ifp = adapter->ifp;
384
385         INIT_DEBUGOUT("ixgb_detach: begin");
386
387 #ifdef DEVICE_POLLING
388         if (ifp->if_capenable & IFCAP_POLLING)
389                 ether_poll_deregister(ifp);
390 #endif
391
392         IXGB_LOCK(adapter);
393         adapter->in_detach = 1;
394
395         ixgb_stop(adapter);
396         IXGB_UNLOCK(adapter);
397
398 #if __FreeBSD_version < 500000
399         ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
400 #else
401         ether_ifdetach(ifp);
402 #endif
403         callout_drain(&adapter->timer);
404         ixgb_free_pci_resources(adapter);
405 #if __FreeBSD_version >= 500000
406         if_free(ifp);
407 #endif
408
409         /* Free Transmit Descriptor ring */
410         if (adapter->tx_desc_base) {
411                 ixgb_dma_free(adapter, &adapter->txdma);
412                 adapter->tx_desc_base = NULL;
413         }
414         /* Free Receive Descriptor ring */
415         if (adapter->rx_desc_base) {
416                 ixgb_dma_free(adapter, &adapter->rxdma);
417                 adapter->rx_desc_base = NULL;
418         }
419         /* Remove from the adapter list */
420         if (ixgb_adapter_list == adapter)
421                 ixgb_adapter_list = adapter->next;
422         if (adapter->next != NULL)
423                 adapter->next->prev = adapter->prev;
424         if (adapter->prev != NULL)
425                 adapter->prev->next = adapter->next;
426         free(adapter->mta, M_DEVBUF);
427
428         IXGB_LOCK_DESTROY(adapter);
429         return (0);
430 }
431
432 /*********************************************************************
433  *
434  *  Shutdown entry point
435  *
436  **********************************************************************/
437
438 static int
439 ixgb_shutdown(device_t dev)
440 {
441         struct adapter *adapter = device_get_softc(dev);
442         IXGB_LOCK(adapter);
443         ixgb_stop(adapter);
444         IXGB_UNLOCK(adapter);
445         return (0);
446 }
447
448
449 /*********************************************************************
450  *  Transmit entry point
451  *
452  *  ixgb_start is called by the stack to initiate a transmit.
453  *  The driver will remain in this routine as long as there are
454  *  packets to transmit and transmit resources are available.
455  *  In case resources are not available stack is notified and
456  *  the packet is requeued.
457  **********************************************************************/
458
459 static void
460 ixgb_start_locked(struct ifnet * ifp)
461 {
462         struct mbuf    *m_head;
463         struct adapter *adapter = ifp->if_softc;
464
465         IXGB_LOCK_ASSERT(adapter);
466
467         if (!adapter->link_active)
468                 return;
469
470         while (ifp->if_snd.ifq_head != NULL) {
471                 IF_DEQUEUE(&ifp->if_snd, m_head);
472
473                 if (m_head == NULL)
474                         break;
475
476                 if (ixgb_encap(adapter, m_head)) {
477                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
478                         IF_PREPEND(&ifp->if_snd, m_head);
479                         break;
480                 }
481                 /* Send a copy of the frame to the BPF listener */
482 #if __FreeBSD_version < 500000
483                 if (ifp->if_bpf)
484                         bpf_mtap(ifp, m_head);
485 #else
486                 ETHER_BPF_MTAP(ifp, m_head);
487 #endif
488                 /* Set timeout in case hardware has problems transmitting */
489                 adapter->tx_timer = IXGB_TX_TIMEOUT;
490
491         }
492         return;
493 }
494
495 static void
496 ixgb_start(struct ifnet *ifp)
497 {
498         struct adapter *adapter = ifp->if_softc;
499
500         IXGB_LOCK(adapter);
501         ixgb_start_locked(ifp);
502         IXGB_UNLOCK(adapter);
503         return;
504 }
505
506 /*********************************************************************
507  *  Ioctl entry point
508  *
509  *  ixgb_ioctl is called when the user wants to configure the
510  *  interface.
511  *
512  *  return 0 on success, positive on failure
513  **********************************************************************/
514
515 static int
516 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
517 {
518         int             mask, error = 0;
519         struct ifreq   *ifr = (struct ifreq *) data;
520         struct adapter *adapter = ifp->if_softc;
521
522         if (adapter->in_detach)
523                 goto out;
524
525         switch (command) {
526         case SIOCSIFADDR:
527         case SIOCGIFADDR:
528                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
529                 ether_ioctl(ifp, command, data);
530                 break;
531         case SIOCSIFMTU:
532                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
533                 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
534                         error = EINVAL;
535                 } else {
536                         IXGB_LOCK(adapter);
537                         ifp->if_mtu = ifr->ifr_mtu;
538                         adapter->hw.max_frame_size =
539                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
540
541                         ixgb_init_locked(adapter);
542                         IXGB_UNLOCK(adapter);
543                 }
544                 break;
545         case SIOCSIFFLAGS:
546                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
547                 IXGB_LOCK(adapter);
548                 if (ifp->if_flags & IFF_UP) {
549                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
550                                 ixgb_init_locked(adapter);
551                         }
552                         ixgb_disable_promisc(adapter);
553                         ixgb_set_promisc(adapter);
554                 } else {
555                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
556                                 ixgb_stop(adapter);
557                         }
558                 }
559                 IXGB_UNLOCK(adapter);
560                 break;
561         case SIOCADDMULTI:
562         case SIOCDELMULTI:
563                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
564                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565                         IXGB_LOCK(adapter);
566                         ixgb_disable_intr(adapter);
567                         ixgb_set_multi(adapter);
568                         ixgb_enable_intr(adapter);
569                         IXGB_UNLOCK(adapter);
570                 }
571                 break;
572         case SIOCSIFMEDIA:
573         case SIOCGIFMEDIA:
574                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
575                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
576                 break;
577         case SIOCSIFCAP:
578                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
579                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
580 #ifdef DEVICE_POLLING
581                 if (mask & IFCAP_POLLING) {
582                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
583                                 error = ether_poll_register(ixgb_poll, ifp);
584                                 if (error)
585                                         return(error);
586                                 IXGB_LOCK(adapter);
587                                 ixgb_disable_intr(adapter);
588                                 ifp->if_capenable |= IFCAP_POLLING;
589                                 IXGB_UNLOCK(adapter);
590                         } else {
591                                 error = ether_poll_deregister(ifp);
592                                 /* Enable interrupt even in error case */
593                                 IXGB_LOCK(adapter);
594                                 ixgb_enable_intr(adapter);
595                                 ifp->if_capenable &= ~IFCAP_POLLING;
596                                 IXGB_UNLOCK(adapter);
597                         }
598                 }
599 #endif /* DEVICE_POLLING */
600                 if (mask & IFCAP_HWCSUM) {
601                         if (IFCAP_HWCSUM & ifp->if_capenable)
602                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
603                         else
604                                 ifp->if_capenable |= IFCAP_HWCSUM;
605                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
606                                 ixgb_init(adapter);
607                 }
608                 break;
609         default:
610                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
611                 error = EINVAL;
612         }
613
614 out:
615         return (error);
616 }
617
618 /*********************************************************************
619  *  Watchdog entry point
620  *
621  *  This routine is called whenever hardware quits transmitting.
622  *
623  **********************************************************************/
624
625 static void
626 ixgb_watchdog(struct adapter *adapter)
627 {
628         struct ifnet *ifp;
629
630         ifp = adapter->ifp;
631
632         /*
633          * If we are in this routine because of pause frames, then don't
634          * reset the hardware.
635          */
636         if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
637                 adapter->tx_timer = IXGB_TX_TIMEOUT;
638                 return;
639         }
640         if_printf(ifp, "watchdog timeout -- resetting\n");
641
642         ixgb_stop(adapter);
643         ixgb_init_locked(adapter);
644
645
646         ifp->if_oerrors++;
647
648         return;
649 }
650
651 /*********************************************************************
652  *  Init entry point
653  *
654  *  This routine is used in two ways. It is used by the stack as
655  *  init entry point in network interface structure. It is also used
656  *  by the driver as a hw/sw initialization routine to get to a
657  *  consistent state.
658  *
659  *  return 0 on success, positive on failure
660  **********************************************************************/
661
662 static void
663 ixgb_init_locked(struct adapter *adapter)
664 {
665         struct ifnet   *ifp;
666
667         INIT_DEBUGOUT("ixgb_init: begin");
668
669         IXGB_LOCK_ASSERT(adapter);
670
671         ixgb_stop(adapter);
672         ifp = adapter->ifp;
673
674         /* Get the latest mac address, User can use a LAA */
675         bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
676             IXGB_ETH_LENGTH_OF_ADDRESS);
677
678         /* Initialize the hardware */
679         if (ixgb_hardware_init(adapter)) {
680                 if_printf(ifp, "Unable to initialize the hardware\n");
681                 return;
682         }
683         ixgb_enable_vlans(adapter);
684
685         /* Prepare transmit descriptors and buffers */
686         if (ixgb_setup_transmit_structures(adapter)) {
687                 if_printf(ifp, "Could not setup transmit structures\n");
688                 ixgb_stop(adapter);
689                 return;
690         }
691         ixgb_initialize_transmit_unit(adapter);
692
693         /* Setup Multicast table */
694         ixgb_set_multi(adapter);
695
696         /* Prepare receive descriptors and buffers */
697         if (ixgb_setup_receive_structures(adapter)) {
698                 if_printf(ifp, "Could not setup receive structures\n");
699                 ixgb_stop(adapter);
700                 return;
701         }
702         ixgb_initialize_receive_unit(adapter);
703
704         /* Don't lose promiscuous settings */
705         ixgb_set_promisc(adapter);
706
707         ifp = adapter->ifp;
708         ifp->if_drv_flags |= IFF_DRV_RUNNING;
709         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
710
711
712         if (ifp->if_capenable & IFCAP_TXCSUM)
713                 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
714         else
715                 ifp->if_hwassist = 0;
716
717
718         /* Enable jumbo frames */
719         if (ifp->if_mtu > ETHERMTU) {
720                 uint32_t        temp_reg;
721                 IXGB_WRITE_REG(&adapter->hw, MFS,
722                                adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
723                 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
724                 temp_reg |= IXGB_CTRL0_JFE;
725                 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
726         }
727         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
728         ixgb_clear_hw_cntrs(&adapter->hw);
729 #ifdef DEVICE_POLLING
730         /*
731          * Only disable interrupts if we are polling, make sure they are on
732          * otherwise.
733          */
734         if (ifp->if_capenable & IFCAP_POLLING)
735                 ixgb_disable_intr(adapter);
736         else
737 #endif
738                 ixgb_enable_intr(adapter);
739
740         return;
741 }
742
743 static void
744 ixgb_init(void *arg)
745 {
746         struct adapter *adapter = arg;
747
748         IXGB_LOCK(adapter);
749         ixgb_init_locked(adapter);
750         IXGB_UNLOCK(adapter);
751         return;
752 }
753
754 #ifdef DEVICE_POLLING
755 static int
756 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
757 {
758         struct adapter *adapter = ifp->if_softc;
759         u_int32_t       reg_icr;
760         int             rx_npkts;
761
762         IXGB_LOCK_ASSERT(adapter);
763
764         if (cmd == POLL_AND_CHECK_STATUS) {
765                 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
766                 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
767                         ixgb_check_for_link(&adapter->hw);
768                         ixgb_print_link_status(adapter);
769                 }
770         }
771         rx_npkts = ixgb_process_receive_interrupts(adapter, count);
772         ixgb_clean_transmit_interrupts(adapter);
773
774         if (ifp->if_snd.ifq_head != NULL)
775                 ixgb_start_locked(ifp);
776         return (rx_npkts);
777 }
778
779 static int
780 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
781 {
782         struct adapter *adapter = ifp->if_softc;
783         int rx_npkts = 0;
784
785         IXGB_LOCK(adapter);
786         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
787                 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
788         IXGB_UNLOCK(adapter);
789         return (rx_npkts);
790 }
791 #endif /* DEVICE_POLLING */
792
793 /*********************************************************************
794  *
795  *  Interrupt Service routine
796  *
797  **********************************************************************/
798
799 static void
800 ixgb_intr(void *arg)
801 {
802         u_int32_t       loop_cnt = IXGB_MAX_INTR;
803         u_int32_t       reg_icr;
804         struct ifnet   *ifp;
805         struct adapter *adapter = arg;
806         boolean_t       rxdmt0 = FALSE;
807
808         IXGB_LOCK(adapter);
809
810         ifp = adapter->ifp;
811
812 #ifdef DEVICE_POLLING
813         if (ifp->if_capenable & IFCAP_POLLING) {
814                 IXGB_UNLOCK(adapter);
815                 return;
816         }
817 #endif
818
819         reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
820         if (reg_icr == 0) {
821                 IXGB_UNLOCK(adapter);
822                 return;
823         }
824
825         if (reg_icr & IXGB_INT_RXDMT0)
826                 rxdmt0 = TRUE;
827
828 #ifdef _SV_
829         if (reg_icr & IXGB_INT_RXDMT0)
830                 adapter->sv_stats.icr_rxdmt0++;
831         if (reg_icr & IXGB_INT_RXO)
832                 adapter->sv_stats.icr_rxo++;
833         if (reg_icr & IXGB_INT_RXT0)
834                 adapter->sv_stats.icr_rxt0++;
835         if (reg_icr & IXGB_INT_TXDW)
836                 adapter->sv_stats.icr_TXDW++;
837 #endif                          /* _SV_ */
838
839         /* Link status change */
840         if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
841                 ixgb_check_for_link(&adapter->hw);
842                 ixgb_print_link_status(adapter);
843         }
844         while (loop_cnt > 0) {
845                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
846                         ixgb_process_receive_interrupts(adapter, -1);
847                         ixgb_clean_transmit_interrupts(adapter);
848                 }
849                 loop_cnt--;
850         }
851
852         if (rxdmt0 && adapter->raidc) {
853                 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
854                 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
855         }
856         if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
857                 ixgb_start_locked(ifp);
858
859         IXGB_UNLOCK(adapter);
860         return;
861 }
862
863
864 /*********************************************************************
865  *
866  *  Media Ioctl callback
867  *
868  *  This routine is called whenever the user queries the status of
869  *  the interface using ifconfig.
870  *
871  **********************************************************************/
872 static void
873 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
874 {
875         struct adapter *adapter = ifp->if_softc;
876
877         INIT_DEBUGOUT("ixgb_media_status: begin");
878
879         ixgb_check_for_link(&adapter->hw);
880         ixgb_print_link_status(adapter);
881
882         ifmr->ifm_status = IFM_AVALID;
883         ifmr->ifm_active = IFM_ETHER;
884
885         if (!adapter->hw.link_up)
886                 return;
887
888         ifmr->ifm_status |= IFM_ACTIVE;
889         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
890
891         return;
892 }
893
894 /*********************************************************************
895  *
896  *  Media Ioctl callback
897  *
898  *  This routine is called when the user changes speed/duplex using
899  *  media/mediopt option with ifconfig.
900  *
901  **********************************************************************/
902 static int
903 ixgb_media_change(struct ifnet * ifp)
904 {
905         struct adapter *adapter = ifp->if_softc;
906         struct ifmedia *ifm = &adapter->media;
907
908         INIT_DEBUGOUT("ixgb_media_change: begin");
909
910         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
911                 return (EINVAL);
912
913         return (0);
914 }
915
916 /*********************************************************************
917  *
918  *  This routine maps the mbufs to tx descriptors.
919  *
920  *  return 0 on success, positive on failure
921  **********************************************************************/
922
923 static int
924 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
925 {
926         u_int8_t        txd_popts;
927         int             i, j, error, nsegs;
928
929 #if __FreeBSD_version < 500000
930         struct ifvlan  *ifv = NULL;
931 #endif
932         bus_dma_segment_t segs[IXGB_MAX_SCATTER];
933         bus_dmamap_t    map;
934         struct ixgb_buffer *tx_buffer = NULL;
935         struct ixgb_tx_desc *current_tx_desc = NULL;
936         struct ifnet   *ifp = adapter->ifp;
937
938         /*
939          * Force a cleanup if number of TX descriptors available hits the
940          * threshold
941          */
942         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
943                 ixgb_clean_transmit_interrupts(adapter);
944         }
945         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
946                 adapter->no_tx_desc_avail1++;
947                 return (ENOBUFS);
948         }
949         /*
950          * Map the packet for DMA.
951          */
952         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
953                 adapter->no_tx_map_avail++;
954                 return (ENOMEM);
955         }
956         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
957                                         &nsegs, BUS_DMA_NOWAIT);
958         if (error != 0) {
959                 adapter->no_tx_dma_setup++;
960                 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
961                        "error %u\n", error);
962                 bus_dmamap_destroy(adapter->txtag, map);
963                 return (error);
964         }
965         KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
966
967         if (nsegs > adapter->num_tx_desc_avail) {
968                 adapter->no_tx_desc_avail2++;
969                 bus_dmamap_destroy(adapter->txtag, map);
970                 return (ENOBUFS);
971         }
972         if (ifp->if_hwassist > 0) {
973                 ixgb_transmit_checksum_setup(adapter, m_head,
974                                              &txd_popts);
975         } else
976                 txd_popts = 0;
977
978         /* Find out if we are in vlan mode */
979 #if __FreeBSD_version < 500000
980         if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
981             m_head->m_pkthdr.rcvif != NULL &&
982             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
983                 ifv = m_head->m_pkthdr.rcvif->if_softc;
984 #elseif __FreeBSD_version < 700000
985         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
986 #endif
987         i = adapter->next_avail_tx_desc;
988         for (j = 0; j < nsegs; j++) {
989                 tx_buffer = &adapter->tx_buffer_area[i];
990                 current_tx_desc = &adapter->tx_desc_base[i];
991
992                 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
993                 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
994                 current_tx_desc->popts = txd_popts;
995                 if (++i == adapter->num_tx_desc)
996                         i = 0;
997
998                 tx_buffer->m_head = NULL;
999         }
1000
1001         adapter->num_tx_desc_avail -= nsegs;
1002         adapter->next_avail_tx_desc = i;
1003
1004 #if __FreeBSD_version < 500000
1005         if (ifv != NULL) {
1006                 /* Set the vlan id */
1007                 current_tx_desc->vlan = ifv->ifv_tag;
1008 #elseif __FreeBSD_version < 700000
1009         if (mtag != NULL) {
1010                 /* Set the vlan id */
1011                 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1012 #else
1013         if (m_head->m_flags & M_VLANTAG) {
1014                 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1015 #endif
1016
1017                 /* Tell hardware to add tag */
1018                 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1019         }
1020         tx_buffer->m_head = m_head;
1021         tx_buffer->map = map;
1022         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1023
1024         /*
1025          * Last Descriptor of Packet needs End Of Packet (EOP)
1026          */
1027         current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1028
1029         /*
1030          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1031          * that this frame is available to transmit.
1032          */
1033         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1034
1035         return (0);
1036 }
1037
1038 static void
1039 ixgb_set_promisc(struct adapter * adapter)
1040 {
1041
1042         u_int32_t       reg_rctl;
1043         struct ifnet   *ifp = adapter->ifp;
1044
1045         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1046
1047         if (ifp->if_flags & IFF_PROMISC) {
1048                 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1049                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1050         } else if (ifp->if_flags & IFF_ALLMULTI) {
1051                 reg_rctl |= IXGB_RCTL_MPE;
1052                 reg_rctl &= ~IXGB_RCTL_UPE;
1053                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1054         }
1055         return;
1056 }
1057
1058 static void
1059 ixgb_disable_promisc(struct adapter * adapter)
1060 {
1061         u_int32_t       reg_rctl;
1062
1063         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1064
1065         reg_rctl &= (~IXGB_RCTL_UPE);
1066         reg_rctl &= (~IXGB_RCTL_MPE);
1067         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1068
1069         return;
1070 }
1071
1072
1073 /*********************************************************************
1074  *  Multicast Update
1075  *
1076  *  This routine is called whenever multicast address list is updated.
1077  *
1078  **********************************************************************/
1079
1080 static void
1081 ixgb_set_multi(struct adapter * adapter)
1082 {
1083         u_int32_t       reg_rctl = 0;
1084         u_int8_t        *mta;
1085         struct ifmultiaddr *ifma;
1086         int             mcnt = 0;
1087         struct ifnet   *ifp = adapter->ifp;
1088
1089         IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1090
1091         mta = adapter->mta;
1092         bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1093             MAX_NUM_MULTICAST_ADDRESSES);
1094
1095         if_maddr_rlock(ifp);
1096 #if __FreeBSD_version < 500000
1097         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1098 #else
1099         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1100 #endif
1101                 if (ifma->ifma_addr->sa_family != AF_LINK)
1102                         continue;
1103
1104                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1105                       &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1106                 mcnt++;
1107         }
1108         if_maddr_runlock(ifp);
1109
1110         if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1111                 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1112                 reg_rctl |= IXGB_RCTL_MPE;
1113                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1114         } else
1115                 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1116
1117         return;
1118 }
1119
1120
1121 /*********************************************************************
1122  *  Timer routine
1123  *
1124  *  This routine checks for link status and updates statistics.
1125  *
1126  **********************************************************************/
1127
1128 static void
1129 ixgb_local_timer(void *arg)
1130 {
1131         struct ifnet   *ifp;
1132         struct adapter *adapter = arg;
1133         ifp = adapter->ifp;
1134
1135         IXGB_LOCK_ASSERT(adapter);
1136
1137         ixgb_check_for_link(&adapter->hw);
1138         ixgb_print_link_status(adapter);
1139         ixgb_update_stats_counters(adapter);
1140         if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141                 ixgb_print_hw_stats(adapter);
1142         }
1143         if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1144                 ixgb_watchdog(adapter);
1145         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1146 }
1147
1148 static void
1149 ixgb_print_link_status(struct adapter * adapter)
1150 {
1151         if (adapter->hw.link_up) {
1152                 if (!adapter->link_active) {
1153                         if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1154                                10000,
1155                                "Full Duplex");
1156                         adapter->link_active = 1;
1157                 }
1158         } else {
1159                 if (adapter->link_active) {
1160                         if_printf(adapter->ifp, "Link is Down \n");
1161                         adapter->link_active = 0;
1162                 }
1163         }
1164
1165         return;
1166 }
1167
1168
1169
1170 /*********************************************************************
1171  *
1172  *  This routine disables all traffic on the adapter by issuing a
1173  *  global reset on the MAC and deallocates TX/RX buffers.
1174  *
1175  **********************************************************************/
1176
1177 static void
1178 ixgb_stop(void *arg)
1179 {
1180         struct ifnet   *ifp;
1181         struct adapter *adapter = arg;
1182         ifp = adapter->ifp;
1183
1184         IXGB_LOCK_ASSERT(adapter);
1185
1186         INIT_DEBUGOUT("ixgb_stop: begin\n");
1187         ixgb_disable_intr(adapter);
1188         adapter->hw.adapter_stopped = FALSE;
1189         ixgb_adapter_stop(&adapter->hw);
1190         callout_stop(&adapter->timer);
1191         ixgb_free_transmit_structures(adapter);
1192         ixgb_free_receive_structures(adapter);
1193
1194         /* Tell the stack that the interface is no longer active */
1195         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1196         adapter->tx_timer = 0;
1197
1198         return;
1199 }
1200
1201
1202 /*********************************************************************
1203  *
1204  *  Determine hardware revision.
1205  *
1206  **********************************************************************/
1207 static void
1208 ixgb_identify_hardware(struct adapter * adapter)
1209 {
1210         device_t        dev = adapter->dev;
1211
1212         /* Make sure our PCI config space has the necessary stuff set */
1213         pci_enable_busmaster(dev);
1214         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1215
1216         /* Save off the information about this board */
1217         adapter->hw.vendor_id = pci_get_vendor(dev);
1218         adapter->hw.device_id = pci_get_device(dev);
1219         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1220         adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1221         adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1222
1223         /* Set MacType, etc. based on this PCI info */
1224         switch (adapter->hw.device_id) {
1225         case IXGB_DEVICE_ID_82597EX:
1226         case IXGB_DEVICE_ID_82597EX_SR:
1227                 adapter->hw.mac_type = ixgb_82597;
1228                 break;
1229         default:
1230                 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1231                 device_printf(dev, "unsupported device id 0x%x\n",
1232                     adapter->hw.device_id);
1233         }
1234
1235         return;
1236 }
1237
1238 static int
1239 ixgb_allocate_pci_resources(struct adapter * adapter)
1240 {
1241         int             rid;
1242         device_t        dev = adapter->dev;
1243
1244         rid = IXGB_MMBA;
1245         adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1246                                                  &rid, 0, ~0, 1,
1247                                                  RF_ACTIVE);
1248         if (!(adapter->res_memory)) {
1249                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1250                 return (ENXIO);
1251         }
1252         adapter->osdep.mem_bus_space_tag =
1253                 rman_get_bustag(adapter->res_memory);
1254         adapter->osdep.mem_bus_space_handle =
1255                 rman_get_bushandle(adapter->res_memory);
1256         adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1257
1258         rid = 0x0;
1259         adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1260                                                     &rid, 0, ~0, 1,
1261                                                   RF_SHAREABLE | RF_ACTIVE);
1262         if (!(adapter->res_interrupt)) {
1263                 device_printf(dev,
1264                     "Unable to allocate bus resource: interrupt\n");
1265                 return (ENXIO);
1266         }
1267         if (bus_setup_intr(dev, adapter->res_interrupt,
1268                            INTR_TYPE_NET | INTR_MPSAFE,
1269                            NULL, (void (*) (void *))ixgb_intr, adapter,
1270                            &adapter->int_handler_tag)) {
1271                 device_printf(dev, "Error registering interrupt handler!\n");
1272                 return (ENXIO);
1273         }
1274         adapter->hw.back = &adapter->osdep;
1275
1276         return (0);
1277 }
1278
1279 static void
1280 ixgb_free_pci_resources(struct adapter * adapter)
1281 {
1282         device_t        dev = adapter->dev;
1283
1284         if (adapter->res_interrupt != NULL) {
1285                 bus_teardown_intr(dev, adapter->res_interrupt,
1286                                   adapter->int_handler_tag);
1287                 bus_release_resource(dev, SYS_RES_IRQ, 0,
1288                                      adapter->res_interrupt);
1289         }
1290         if (adapter->res_memory != NULL) {
1291                 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1292                                      adapter->res_memory);
1293         }
1294         if (adapter->res_ioport != NULL) {
1295                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1296                                      adapter->res_ioport);
1297         }
1298         return;
1299 }
1300
1301 /*********************************************************************
1302  *
1303  *  Initialize the hardware to a configuration as specified by the
1304  *  adapter structure. The controller is reset, the EEPROM is
1305  *  verified, the MAC address is set, then the shared initialization
1306  *  routines are called.
1307  *
1308  **********************************************************************/
1309 static int
1310 ixgb_hardware_init(struct adapter * adapter)
1311 {
1312         /* Issue a global reset */
1313         adapter->hw.adapter_stopped = FALSE;
1314         ixgb_adapter_stop(&adapter->hw);
1315
1316         /* Make sure we have a good EEPROM before we read from it */
1317         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1318                 device_printf(adapter->dev,
1319                     "The EEPROM Checksum Is Not Valid\n");
1320                 return (EIO);
1321         }
1322         if (!ixgb_init_hw(&adapter->hw)) {
1323                 device_printf(adapter->dev, "Hardware Initialization Failed");
1324                 return (EIO);
1325         }
1326
1327         return (0);
1328 }
1329
1330 /*********************************************************************
1331  *
1332  *  Setup networking device structure and register an interface.
1333  *
1334  **********************************************************************/
1335 static int
1336 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1337 {
1338         struct ifnet   *ifp;
1339         INIT_DEBUGOUT("ixgb_setup_interface: begin");
1340
1341         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1342         if (ifp == NULL) {
1343                 device_printf(dev, "can not allocate ifnet structure\n");
1344                 return (-1);
1345         }
1346 #if __FreeBSD_version >= 502000
1347         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1348 #else
1349         ifp->if_unit = device_get_unit(dev);
1350         ifp->if_name = "ixgb";
1351 #endif
1352         ifp->if_baudrate = 1000000000;
1353         ifp->if_init = ixgb_init;
1354         ifp->if_softc = adapter;
1355         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1356         ifp->if_ioctl = ixgb_ioctl;
1357         ifp->if_start = ixgb_start;
1358         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1359
1360 #if __FreeBSD_version < 500000
1361         ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1362 #else
1363         ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1364 #endif
1365
1366         ifp->if_capabilities = IFCAP_HWCSUM;
1367
1368         /*
1369          * Tell the upper layer(s) we support long frames.
1370          */
1371         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1372
1373 #if __FreeBSD_version >= 500000
1374         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1375 #endif
1376
1377         ifp->if_capenable = ifp->if_capabilities;
1378
1379 #ifdef DEVICE_POLLING
1380         ifp->if_capabilities |= IFCAP_POLLING;
1381 #endif
1382
1383         /*
1384          * Specify the media types supported by this adapter and register
1385          * callbacks to update media and link information
1386          */
1387         ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1388                      ixgb_media_status);
1389         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1390                     0, NULL);
1391         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1392                     0, NULL);
1393         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1394         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1395
1396         return (0);
1397 }
1398
1399 /********************************************************************
1400  * Manage DMA'able memory.
1401  *******************************************************************/
1402 static void
1403 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1404 {
1405         if (error)
1406                 return;
1407         *(bus_addr_t *) arg = segs->ds_addr;
1408         return;
1409 }
1410
1411 static int
1412 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1413                 struct ixgb_dma_alloc * dma, int mapflags)
1414 {
1415         device_t dev;
1416         int             r;
1417
1418         dev = adapter->dev;
1419         r = bus_dma_tag_create(bus_get_dma_tag(dev),    /* parent */
1420                                PAGE_SIZE, 0,    /* alignment, bounds */
1421                                BUS_SPACE_MAXADDR,       /* lowaddr */
1422                                BUS_SPACE_MAXADDR,       /* highaddr */
1423                                NULL, NULL,      /* filter, filterarg */
1424                                size,    /* maxsize */
1425                                1,       /* nsegments */
1426                                size,    /* maxsegsize */
1427                                BUS_DMA_ALLOCNOW,        /* flags */
1428 #if __FreeBSD_version >= 502000
1429                                NULL,    /* lockfunc */
1430                                NULL,    /* lockfuncarg */
1431 #endif
1432                                &dma->dma_tag);
1433         if (r != 0) {
1434                 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1435                        "error %u\n", r);
1436                 goto fail_0;
1437         }
1438         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1439                              BUS_DMA_NOWAIT, &dma->dma_map);
1440         if (r != 0) {
1441                 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1442                        "error %u\n", r);
1443                 goto fail_1;
1444         }
1445         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1446                             size,
1447                             ixgb_dmamap_cb,
1448                             &dma->dma_paddr,
1449                             mapflags | BUS_DMA_NOWAIT);
1450         if (r != 0) {
1451                 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1452                        "error %u\n", r);
1453                 goto fail_2;
1454         }
1455         dma->dma_size = size;
1456         return (0);
1457 fail_2:
1458         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1459 fail_1:
1460         bus_dma_tag_destroy(dma->dma_tag);
1461 fail_0:
1462         dma->dma_map = NULL;
1463         dma->dma_tag = NULL;
1464         return (r);
1465 }
1466
1467
1468
1469 static void
1470 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1471 {
1472         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1473         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1474         bus_dma_tag_destroy(dma->dma_tag);
1475 }
1476
1477 /*********************************************************************
1478  *
1479  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1480  *  the information needed to transmit a packet on the wire.
1481  *
1482  **********************************************************************/
1483 static int
1484 ixgb_allocate_transmit_structures(struct adapter * adapter)
1485 {
1486         if (!(adapter->tx_buffer_area =
1487               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1488                                             adapter->num_tx_desc, M_DEVBUF,
1489                                             M_NOWAIT | M_ZERO))) {
1490                 device_printf(adapter->dev,
1491                     "Unable to allocate tx_buffer memory\n");
1492                 return ENOMEM;
1493         }
1494         bzero(adapter->tx_buffer_area,
1495               sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1496
1497         return 0;
1498 }
1499
1500 /*********************************************************************
1501  *
1502  *  Allocate and initialize transmit structures.
1503  *
1504  **********************************************************************/
1505 static int
1506 ixgb_setup_transmit_structures(struct adapter * adapter)
1507 {
1508         /*
1509          * Setup DMA descriptor areas.
1510          */
1511         if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
1512                                PAGE_SIZE, 0,    /* alignment, bounds */
1513                                BUS_SPACE_MAXADDR,       /* lowaddr */
1514                                BUS_SPACE_MAXADDR,       /* highaddr */
1515                                NULL, NULL,      /* filter, filterarg */
1516                                MCLBYTES * IXGB_MAX_SCATTER,     /* maxsize */
1517                                IXGB_MAX_SCATTER,        /* nsegments */
1518                                MCLBYTES,        /* maxsegsize */
1519                                BUS_DMA_ALLOCNOW,        /* flags */
1520 #if __FreeBSD_version >= 502000
1521                                NULL,    /* lockfunc */
1522                                NULL,    /* lockfuncarg */
1523 #endif
1524                                &adapter->txtag)) {
1525                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1526                 return (ENOMEM);
1527         }
1528         if (ixgb_allocate_transmit_structures(adapter))
1529                 return ENOMEM;
1530
1531         bzero((void *)adapter->tx_desc_base,
1532               (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1533
1534         adapter->next_avail_tx_desc = 0;
1535         adapter->oldest_used_tx_desc = 0;
1536
1537         /* Set number of descriptors available */
1538         adapter->num_tx_desc_avail = adapter->num_tx_desc;
1539
1540         /* Set checksum context */
1541         adapter->active_checksum_context = OFFLOAD_NONE;
1542
1543         return 0;
1544 }
1545
1546 /*********************************************************************
1547  *
1548  *  Enable transmit unit.
1549  *
1550  **********************************************************************/
1551 static void
1552 ixgb_initialize_transmit_unit(struct adapter * adapter)
1553 {
1554         u_int32_t       reg_tctl;
1555         u_int64_t       tdba = adapter->txdma.dma_paddr;
1556
1557         /* Setup the Base and Length of the Tx Descriptor Ring */
1558         IXGB_WRITE_REG(&adapter->hw, TDBAL,
1559                        (tdba & 0x00000000ffffffffULL));
1560         IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1561         IXGB_WRITE_REG(&adapter->hw, TDLEN,
1562                        adapter->num_tx_desc *
1563                        sizeof(struct ixgb_tx_desc));
1564
1565         /* Setup the HW Tx Head and Tail descriptor pointers */
1566         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1567         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1568
1569
1570         HW_DEBUGOUT2("Base = %x, Length = %x\n",
1571                      IXGB_READ_REG(&adapter->hw, TDBAL),
1572                      IXGB_READ_REG(&adapter->hw, TDLEN));
1573
1574         IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1575
1576
1577         /* Program the Transmit Control Register */
1578         reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1579         reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1580         IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1581
1582         /* Setup Transmit Descriptor Settings for this adapter */
1583         adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1584
1585         if (adapter->tx_int_delay > 0)
1586                 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1587         return;
1588 }
1589
1590 /*********************************************************************
1591  *
1592  *  Free all transmit related data structures.
1593  *
1594  **********************************************************************/
1595 static void
1596 ixgb_free_transmit_structures(struct adapter * adapter)
1597 {
1598         struct ixgb_buffer *tx_buffer;
1599         int             i;
1600
1601         INIT_DEBUGOUT("free_transmit_structures: begin");
1602
1603         if (adapter->tx_buffer_area != NULL) {
1604                 tx_buffer = adapter->tx_buffer_area;
1605                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1606                         if (tx_buffer->m_head != NULL) {
1607                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1608                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1609                                 m_freem(tx_buffer->m_head);
1610                         }
1611                         tx_buffer->m_head = NULL;
1612                 }
1613         }
1614         if (adapter->tx_buffer_area != NULL) {
1615                 free(adapter->tx_buffer_area, M_DEVBUF);
1616                 adapter->tx_buffer_area = NULL;
1617         }
1618         if (adapter->txtag != NULL) {
1619                 bus_dma_tag_destroy(adapter->txtag);
1620                 adapter->txtag = NULL;
1621         }
1622         return;
1623 }
1624
1625 /*********************************************************************
1626  *
1627  *  The offload context needs to be set when we transfer the first
1628  *  packet of a particular protocol (TCP/UDP). We change the
1629  *  context only if the protocol type changes.
1630  *
1631  **********************************************************************/
1632 static void
1633 ixgb_transmit_checksum_setup(struct adapter * adapter,
1634                              struct mbuf * mp,
1635                              u_int8_t * txd_popts)
1636 {
1637         struct ixgb_context_desc *TXD;
1638         struct ixgb_buffer *tx_buffer;
1639         int             curr_txd;
1640
1641         if (mp->m_pkthdr.csum_flags) {
1642
1643                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1644                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1645                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1646                                 return;
1647                         else
1648                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1649                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1650                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1652                                 return;
1653                         else
1654                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1655                 } else {
1656                         *txd_popts = 0;
1657                         return;
1658                 }
1659         } else {
1660                 *txd_popts = 0;
1661                 return;
1662         }
1663
1664         /*
1665          * If we reach this point, the checksum offload context needs to be
1666          * reset.
1667          */
1668         curr_txd = adapter->next_avail_tx_desc;
1669         tx_buffer = &adapter->tx_buffer_area[curr_txd];
1670         TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1671
1672
1673         TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1674         TXD->tucse = 0;
1675
1676         TXD->mss = 0;
1677
1678         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1679                 TXD->tucso =
1680                         ENET_HEADER_SIZE + sizeof(struct ip) +
1681                         offsetof(struct tcphdr, th_sum);
1682         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1683                 TXD->tucso =
1684                         ENET_HEADER_SIZE + sizeof(struct ip) +
1685                         offsetof(struct udphdr, uh_sum);
1686         }
1687         TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1688
1689         tx_buffer->m_head = NULL;
1690
1691         if (++curr_txd == adapter->num_tx_desc)
1692                 curr_txd = 0;
1693
1694         adapter->num_tx_desc_avail--;
1695         adapter->next_avail_tx_desc = curr_txd;
1696         return;
1697 }
1698
1699 /**********************************************************************
1700  *
1701  *  Examine each tx_buffer in the used queue. If the hardware is done
1702  *  processing the packet then free associated resources. The
1703  *  tx_buffer is put back on the free queue.
1704  *
1705  **********************************************************************/
1706 static void
1707 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1708 {
1709         int             i, num_avail;
1710         struct ixgb_buffer *tx_buffer;
1711         struct ixgb_tx_desc *tx_desc;
1712
1713         IXGB_LOCK_ASSERT(adapter);
1714
1715         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1716                 return;
1717
1718 #ifdef _SV_
1719         adapter->clean_tx_interrupts++;
1720 #endif
1721         num_avail = adapter->num_tx_desc_avail;
1722         i = adapter->oldest_used_tx_desc;
1723
1724         tx_buffer = &adapter->tx_buffer_area[i];
1725         tx_desc = &adapter->tx_desc_base[i];
1726
1727         while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1728
1729                 tx_desc->status = 0;
1730                 num_avail++;
1731
1732                 if (tx_buffer->m_head) {
1733                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1734                                         BUS_DMASYNC_POSTWRITE);
1735                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1736                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1737                         m_freem(tx_buffer->m_head);
1738                         tx_buffer->m_head = NULL;
1739                 }
1740                 if (++i == adapter->num_tx_desc)
1741                         i = 0;
1742
1743                 tx_buffer = &adapter->tx_buffer_area[i];
1744                 tx_desc = &adapter->tx_desc_base[i];
1745         }
1746
1747         adapter->oldest_used_tx_desc = i;
1748
1749         /*
1750          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1751          * it is OK to send packets. If there are no pending descriptors,
1752          * clear the timeout. Otherwise, if some descriptors have been freed,
1753          * restart the timeout.
1754          */
1755         if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1756                 struct ifnet   *ifp = adapter->ifp;
1757
1758                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1759                 if (num_avail == adapter->num_tx_desc)
1760                         adapter->tx_timer = 0;
1761                 else if (num_avail == adapter->num_tx_desc_avail)
1762                         adapter->tx_timer = IXGB_TX_TIMEOUT;
1763         }
1764         adapter->num_tx_desc_avail = num_avail;
1765         return;
1766 }
1767
1768
1769 /*********************************************************************
1770  *
1771  *  Get a buffer from system mbuf buffer pool.
1772  *
1773  **********************************************************************/
1774 static int
1775 ixgb_get_buf(int i, struct adapter * adapter,
1776              struct mbuf * nmp)
1777 {
1778         register struct mbuf *mp = nmp;
1779         struct ixgb_buffer *rx_buffer;
1780         struct ifnet   *ifp;
1781         bus_addr_t      paddr;
1782         int             error;
1783
1784         ifp = adapter->ifp;
1785
1786         if (mp == NULL) {
1787
1788                 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1789
1790                 if (mp == NULL) {
1791                         adapter->mbuf_alloc_failed++;
1792                         return (ENOBUFS);
1793                 }
1794                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1795         } else {
1796                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797                 mp->m_data = mp->m_ext.ext_buf;
1798                 mp->m_next = NULL;
1799         }
1800
1801         if (ifp->if_mtu <= ETHERMTU) {
1802                 m_adj(mp, ETHER_ALIGN);
1803         }
1804         rx_buffer = &adapter->rx_buffer_area[i];
1805
1806         /*
1807          * Using memory from the mbuf cluster pool, invoke the bus_dma
1808          * machinery to arrange the memory mapping.
1809          */
1810         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1811                                 mtod(mp, void *), mp->m_len,
1812                                 ixgb_dmamap_cb, &paddr, 0);
1813         if (error) {
1814                 m_free(mp);
1815                 return (error);
1816         }
1817         rx_buffer->m_head = mp;
1818         adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1819         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1820
1821         return (0);
1822 }
1823
1824 /*********************************************************************
1825  *
1826  *  Allocate memory for rx_buffer structures. Since we use one
1827  *  rx_buffer per received packet, the maximum number of rx_buffer's
1828  *  that we'll need is equal to the number of receive descriptors
1829  *  that we've allocated.
1830  *
1831  **********************************************************************/
1832 static int
1833 ixgb_allocate_receive_structures(struct adapter * adapter)
1834 {
1835         int             i, error;
1836         struct ixgb_buffer *rx_buffer;
1837
1838         if (!(adapter->rx_buffer_area =
1839               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1840                                             adapter->num_rx_desc, M_DEVBUF,
1841                                             M_NOWAIT | M_ZERO))) {
1842                 device_printf(adapter->dev,
1843                     "Unable to allocate rx_buffer memory\n");
1844                 return (ENOMEM);
1845         }
1846         bzero(adapter->rx_buffer_area,
1847               sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1848
1849         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1850                                    PAGE_SIZE, 0,        /* alignment, bounds */
1851                                    BUS_SPACE_MAXADDR,   /* lowaddr */
1852                                    BUS_SPACE_MAXADDR,   /* highaddr */
1853                                    NULL, NULL,  /* filter, filterarg */
1854                                    MCLBYTES,    /* maxsize */
1855                                    1,   /* nsegments */
1856                                    MCLBYTES,    /* maxsegsize */
1857                                    BUS_DMA_ALLOCNOW,    /* flags */
1858 #if __FreeBSD_version >= 502000
1859                                    NULL,        /* lockfunc */
1860                                    NULL,        /* lockfuncarg */
1861 #endif
1862                                    &adapter->rxtag);
1863         if (error != 0) {
1864                 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1865                        "bus_dma_tag_create failed; error %u\n",
1866                        error);
1867                 goto fail_0;
1868         }
1869         rx_buffer = adapter->rx_buffer_area;
1870         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1871                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1872                                           &rx_buffer->map);
1873                 if (error != 0) {
1874                         device_printf(adapter->dev,
1875                                "ixgb_allocate_receive_structures: "
1876                                "bus_dmamap_create failed; error %u\n",
1877                                error);
1878                         goto fail_1;
1879                 }
1880         }
1881
1882         for (i = 0; i < adapter->num_rx_desc; i++) {
1883                 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1884                         adapter->rx_buffer_area[i].m_head = NULL;
1885                         adapter->rx_desc_base[i].buff_addr = 0;
1886                         return (ENOBUFS);
1887                 }
1888         }
1889
1890         return (0);
1891 fail_1:
1892         bus_dma_tag_destroy(adapter->rxtag);
1893 fail_0:
1894         adapter->rxtag = NULL;
1895         free(adapter->rx_buffer_area, M_DEVBUF);
1896         adapter->rx_buffer_area = NULL;
1897         return (error);
1898 }
1899
1900 /*********************************************************************
1901  *
1902  *  Allocate and initialize receive structures.
1903  *
1904  **********************************************************************/
1905 static int
1906 ixgb_setup_receive_structures(struct adapter * adapter)
1907 {
1908         bzero((void *)adapter->rx_desc_base,
1909               (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1910
1911         if (ixgb_allocate_receive_structures(adapter))
1912                 return ENOMEM;
1913
1914         /* Setup our descriptor pointers */
1915         adapter->next_rx_desc_to_check = 0;
1916         adapter->next_rx_desc_to_use = 0;
1917         return (0);
1918 }
1919
1920 /*********************************************************************
1921  *
1922  *  Enable receive unit.
1923  *
1924  **********************************************************************/
1925 static void
1926 ixgb_initialize_receive_unit(struct adapter * adapter)
1927 {
1928         u_int32_t       reg_rctl;
1929         u_int32_t       reg_rxcsum;
1930         u_int32_t       reg_rxdctl;
1931         struct ifnet   *ifp;
1932         u_int64_t       rdba = adapter->rxdma.dma_paddr;
1933
1934         ifp = adapter->ifp;
1935
1936         /*
1937          * Make sure receives are disabled while setting up the descriptor
1938          * ring
1939          */
1940         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1941         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1942
1943         /* Set the Receive Delay Timer Register */
1944         IXGB_WRITE_REG(&adapter->hw, RDTR,
1945                        adapter->rx_int_delay);
1946
1947
1948         /* Setup the Base and Length of the Rx Descriptor Ring */
1949         IXGB_WRITE_REG(&adapter->hw, RDBAL,
1950                        (rdba & 0x00000000ffffffffULL));
1951         IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1952         IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1953                        sizeof(struct ixgb_rx_desc));
1954
1955         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1956         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1957
1958         IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1959
1960
1961
1962         reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1963                 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1964                 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1965         IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1966
1967
1968         adapter->raidc = 1;
1969         if (adapter->raidc) {
1970                 uint32_t        raidc;
1971                 uint8_t         poll_threshold;
1972 #define IXGB_RAIDC_POLL_DEFAULT 120
1973
1974                 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1975                 poll_threshold >>= 1;
1976                 poll_threshold &= 0x3F;
1977                 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1978                         (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1979                         (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1980                         poll_threshold;
1981                 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1982         }
1983         /* Enable Receive Checksum Offload for TCP and UDP ? */
1984         if (ifp->if_capenable & IFCAP_RXCSUM) {
1985                 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1986                 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1987                 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1988         }
1989         /* Setup the Receive Control Register */
1990         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1991         reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1992         reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1993                 IXGB_RCTL_CFF |
1994                 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1995
1996         switch (adapter->rx_buffer_len) {
1997         default:
1998         case IXGB_RXBUFFER_2048:
1999                 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2000                 break;
2001         case IXGB_RXBUFFER_4096:
2002                 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2003                 break;
2004         case IXGB_RXBUFFER_8192:
2005                 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2006                 break;
2007         case IXGB_RXBUFFER_16384:
2008                 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2009                 break;
2010         }
2011
2012         reg_rctl |= IXGB_RCTL_RXEN;
2013
2014
2015         /* Enable Receives */
2016         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2017
2018         return;
2019 }
2020
2021 /*********************************************************************
2022  *
2023  *  Free receive related data structures.
2024  *
2025  **********************************************************************/
2026 static void
2027 ixgb_free_receive_structures(struct adapter * adapter)
2028 {
2029         struct ixgb_buffer *rx_buffer;
2030         int             i;
2031
2032         INIT_DEBUGOUT("free_receive_structures: begin");
2033
2034         if (adapter->rx_buffer_area != NULL) {
2035                 rx_buffer = adapter->rx_buffer_area;
2036                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2037                         if (rx_buffer->map != NULL) {
2038                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2039                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2040                         }
2041                         if (rx_buffer->m_head != NULL)
2042                                 m_freem(rx_buffer->m_head);
2043                         rx_buffer->m_head = NULL;
2044                 }
2045         }
2046         if (adapter->rx_buffer_area != NULL) {
2047                 free(adapter->rx_buffer_area, M_DEVBUF);
2048                 adapter->rx_buffer_area = NULL;
2049         }
2050         if (adapter->rxtag != NULL) {
2051                 bus_dma_tag_destroy(adapter->rxtag);
2052                 adapter->rxtag = NULL;
2053         }
2054         return;
2055 }
2056
2057 /*********************************************************************
2058  *
2059  *  This routine executes in interrupt context. It replenishes
2060  *  the mbufs in the descriptor and sends data which has been
2061  *  dma'ed into host memory to upper layer.
2062  *
2063  *  We loop at most count times if count is > 0, or until done if
2064  *  count < 0.
2065  *
2066  *********************************************************************/
2067 static int
2068 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2069 {
2070         struct ifnet   *ifp;
2071         struct mbuf    *mp;
2072 #if __FreeBSD_version < 500000
2073         struct ether_header *eh;
2074 #endif
2075         int             eop = 0;
2076         int             len;
2077         u_int8_t        accept_frame = 0;
2078         int             i;
2079         int             next_to_use = 0;
2080         int             eop_desc;
2081         int             rx_npkts = 0;
2082         /* Pointer to the receive descriptor being examined. */
2083         struct ixgb_rx_desc *current_desc;
2084
2085         IXGB_LOCK_ASSERT(adapter);
2086
2087         ifp = adapter->ifp;
2088         i = adapter->next_rx_desc_to_check;
2089         next_to_use = adapter->next_rx_desc_to_use;
2090         eop_desc = adapter->next_rx_desc_to_check;
2091         current_desc = &adapter->rx_desc_base[i];
2092
2093         if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2094 #ifdef _SV_
2095                 adapter->no_pkts_avail++;
2096 #endif
2097                 return (rx_npkts);
2098         }
2099         while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2100
2101                 mp = adapter->rx_buffer_area[i].m_head;
2102                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2103                                 BUS_DMASYNC_POSTREAD);
2104                 accept_frame = 1;
2105                 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2106                         count--;
2107                         eop = 1;
2108                 } else {
2109                         eop = 0;
2110                 }
2111                 len = current_desc->length;
2112
2113                 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2114                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2115                                             IXGB_RX_DESC_ERRORS_RXE)) {
2116                         accept_frame = 0;
2117                 }
2118                 if (accept_frame) {
2119
2120                         /* Assign correct length to the current fragment */
2121                         mp->m_len = len;
2122
2123                         if (adapter->fmp == NULL) {
2124                                 mp->m_pkthdr.len = len;
2125                                 adapter->fmp = mp;      /* Store the first mbuf */
2126                                 adapter->lmp = mp;
2127                         } else {
2128                                 /* Chain mbuf's together */
2129                                 mp->m_flags &= ~M_PKTHDR;
2130                                 adapter->lmp->m_next = mp;
2131                                 adapter->lmp = adapter->lmp->m_next;
2132                                 adapter->fmp->m_pkthdr.len += len;
2133                         }
2134
2135                         if (eop) {
2136                                 eop_desc = i;
2137                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2138
2139 #if __FreeBSD_version < 500000
2140                                 eh = mtod(adapter->fmp, struct ether_header *);
2141
2142                                 /* Remove ethernet header from mbuf */
2143                                 m_adj(adapter->fmp, sizeof(struct ether_header));
2144                                 ixgb_receive_checksum(adapter, current_desc,
2145                                                       adapter->fmp);
2146
2147                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2148                                         VLAN_INPUT_TAG(eh, adapter->fmp,
2149                                                      current_desc->special);
2150                                 else
2151                                         ether_input(ifp, eh, adapter->fmp);
2152 #else
2153                                 ixgb_receive_checksum(adapter, current_desc,
2154                                                       adapter->fmp);
2155 #if __FreeBSD_version < 700000
2156                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2157                                         VLAN_INPUT_TAG(ifp, adapter->fmp,
2158                                                        current_desc->special);
2159 #else
2160                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2161                                         adapter->fmp->m_pkthdr.ether_vtag =
2162                                             current_desc->special;
2163                                         adapter->fmp->m_flags |= M_VLANTAG;
2164                                 }
2165 #endif
2166
2167                                 if (adapter->fmp != NULL) {
2168                                         IXGB_UNLOCK(adapter);
2169                                         (*ifp->if_input) (ifp, adapter->fmp);
2170                                         IXGB_LOCK(adapter);
2171                                         rx_npkts++;
2172                                 }
2173 #endif
2174                                 adapter->fmp = NULL;
2175                                 adapter->lmp = NULL;
2176                         }
2177                         adapter->rx_buffer_area[i].m_head = NULL;
2178                 } else {
2179                         adapter->dropped_pkts++;
2180                         if (adapter->fmp != NULL)
2181                                 m_freem(adapter->fmp);
2182                         adapter->fmp = NULL;
2183                         adapter->lmp = NULL;
2184                 }
2185
2186                 /* Zero out the receive descriptors status  */
2187                 current_desc->status = 0;
2188
2189                 /* Advance our pointers to the next descriptor */
2190                 if (++i == adapter->num_rx_desc) {
2191                         i = 0;
2192                         current_desc = adapter->rx_desc_base;
2193                 } else
2194                         current_desc++;
2195         }
2196         adapter->next_rx_desc_to_check = i;
2197
2198         if (--i < 0)
2199                 i = (adapter->num_rx_desc - 1);
2200
2201         /*
2202          * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2203          * memory corruption). Avoid using and re-submitting the most recently received RX
2204          * descriptor back to hardware.
2205          *
2206          * if(Last written back descriptor == EOP bit set descriptor)
2207          *      then avoid re-submitting the most recently received RX descriptor 
2208          *      back to hardware.
2209          * if(Last written back descriptor != EOP bit set descriptor)
2210          *      then avoid re-submitting the most recently received RX descriptors
2211          *      till last EOP bit set descriptor. 
2212          */
2213         if (eop_desc != i) {
2214                 if (++eop_desc == adapter->num_rx_desc)
2215                         eop_desc = 0;
2216                 i = eop_desc;
2217         }
2218         /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2219         while (next_to_use != i) {
2220                 current_desc = &adapter->rx_desc_base[next_to_use];
2221                 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2222                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2223                                              IXGB_RX_DESC_ERRORS_RXE))) {
2224                         mp = adapter->rx_buffer_area[next_to_use].m_head;
2225                         ixgb_get_buf(next_to_use, adapter, mp);
2226                 } else {
2227                         if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2228                                 break;
2229                 }
2230                 /* Advance our pointers to the next descriptor */
2231                 if (++next_to_use == adapter->num_rx_desc) {
2232                         next_to_use = 0;
2233                         current_desc = adapter->rx_desc_base;
2234                 } else
2235                         current_desc++;
2236         }
2237         adapter->next_rx_desc_to_use = next_to_use;
2238         if (--next_to_use < 0)
2239                 next_to_use = (adapter->num_rx_desc - 1);
2240         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2241         IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2242
2243         return (rx_npkts);
2244 }
2245
2246 /*********************************************************************
2247  *
2248  *  Verify that the hardware indicated that the checksum is valid.
2249  *  Inform the stack about the status of checksum so that stack
2250  *  doesn't spend time verifying the checksum.
2251  *
2252  *********************************************************************/
2253 static void
2254 ixgb_receive_checksum(struct adapter * adapter,
2255                       struct ixgb_rx_desc * rx_desc,
2256                       struct mbuf * mp)
2257 {
2258         if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2259                 mp->m_pkthdr.csum_flags = 0;
2260                 return;
2261         }
2262         if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2263                 /* Did it pass? */
2264                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2265                         /* IP Checksum Good */
2266                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2267                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2268
2269                 } else {
2270                         mp->m_pkthdr.csum_flags = 0;
2271                 }
2272         }
2273         if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2274                 /* Did it pass? */
2275                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2276                         mp->m_pkthdr.csum_flags |=
2277                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2278                         mp->m_pkthdr.csum_data = htons(0xffff);
2279                 }
2280         }
2281         return;
2282 }
2283
2284
2285 static void
2286 ixgb_enable_vlans(struct adapter * adapter)
2287 {
2288         uint32_t        ctrl;
2289
2290         ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2291         ctrl |= IXGB_CTRL0_VME;
2292         IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2293
2294         return;
2295 }
2296
2297
2298 static void
2299 ixgb_enable_intr(struct adapter * adapter)
2300 {
2301         IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2302                             IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2303         return;
2304 }
2305
2306 static void
2307 ixgb_disable_intr(struct adapter * adapter)
2308 {
2309         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2310         return;
2311 }
2312
2313 void
2314 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2315                    uint32_t reg,
2316                    uint16_t * value)
2317 {
2318         pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2319                          *value, 2);
2320 }
2321
2322 /**********************************************************************
2323  *
2324  *  Update the board statistics counters.
2325  *
2326  **********************************************************************/
2327 static void
2328 ixgb_update_stats_counters(struct adapter * adapter)
2329 {
2330         struct ifnet   *ifp;
2331
2332         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2333         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2334         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2335         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2336         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2337         adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2338         adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2339         adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2340         adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2341         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2342
2343         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2344         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2345         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2346         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2347         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2348         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2349         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2350         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2351         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2352         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2353         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2354         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2355         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2356         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2357         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2358         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2359         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2360         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2361         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2362         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2363         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2364         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2365         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2366         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2367         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2368         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2369         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2370
2371         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2372         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2373         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2374         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2375         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2376         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2377         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2378         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2379         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2380         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2381         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2382         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2383         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2384         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2385         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2386         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2387         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2388         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2389         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2390         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2391         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2392         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2393
2394         ifp = adapter->ifp;
2395
2396         /* Fill out the OS statistics structure */
2397         ifp->if_ipackets = adapter->stats.gprcl;
2398         ifp->if_opackets = adapter->stats.gptcl;
2399         ifp->if_ibytes = adapter->stats.gorcl;
2400         ifp->if_obytes = adapter->stats.gotcl;
2401         ifp->if_imcasts = adapter->stats.mprcl;
2402         ifp->if_collisions = 0;
2403
2404         /* Rx Errors */
2405         ifp->if_ierrors =
2406                 adapter->dropped_pkts +
2407                 adapter->stats.crcerrs +
2408                 adapter->stats.rnbc +
2409                 adapter->stats.mpc +
2410                 adapter->stats.rlec;
2411
2412
2413 }
2414
2415
2416 /**********************************************************************
2417  *
2418  *  This routine is called only when ixgb_display_debug_stats is enabled.
2419  *  This routine provides a way to take a look at important statistics
2420  *  maintained by the driver and hardware.
2421  *
2422  **********************************************************************/
2423 static void
2424 ixgb_print_hw_stats(struct adapter * adapter)
2425 {
2426         char            buf_speed[100], buf_type[100];
2427         ixgb_bus_speed  bus_speed;
2428         ixgb_bus_type   bus_type;
2429         device_t dev;
2430
2431         dev = adapter->dev;
2432 #ifdef _SV_
2433         device_printf(dev, "Packets not Avail = %ld\n",
2434                adapter->no_pkts_avail);
2435         device_printf(dev, "CleanTxInterrupts = %ld\n",
2436                adapter->clean_tx_interrupts);
2437         device_printf(dev, "ICR RXDMT0 = %lld\n",
2438                (long long)adapter->sv_stats.icr_rxdmt0);
2439         device_printf(dev, "ICR RXO = %lld\n",
2440                (long long)adapter->sv_stats.icr_rxo);
2441         device_printf(dev, "ICR RXT0 = %lld\n",
2442                (long long)adapter->sv_stats.icr_rxt0);
2443         device_printf(dev, "ICR TXDW = %lld\n",
2444                (long long)adapter->sv_stats.icr_TXDW);
2445 #endif                          /* _SV_ */
2446
2447         bus_speed = adapter->hw.bus.speed;
2448         bus_type = adapter->hw.bus.type;
2449         sprintf(buf_speed,
2450                 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2451                 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2452                 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2453                 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2454                 "UNKNOWN");
2455         device_printf(dev, "PCI_Bus_Speed = %s\n",
2456                buf_speed);
2457
2458         sprintf(buf_type,
2459                 bus_type == ixgb_bus_type_pci ? "PCI" :
2460                 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2461                 "UNKNOWN");
2462         device_printf(dev, "PCI_Bus_Type = %s\n",
2463                buf_type);
2464
2465         device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2466                adapter->no_tx_desc_avail1);
2467         device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2468                adapter->no_tx_desc_avail2);
2469         device_printf(dev, "Std Mbuf Failed = %ld\n",
2470                adapter->mbuf_alloc_failed);
2471         device_printf(dev, "Std Cluster Failed = %ld\n",
2472                adapter->mbuf_cluster_failed);
2473
2474         device_printf(dev, "Defer count = %lld\n",
2475                (long long)adapter->stats.dc);
2476         device_printf(dev, "Missed Packets = %lld\n",
2477                (long long)adapter->stats.mpc);
2478         device_printf(dev, "Receive No Buffers = %lld\n",
2479                (long long)adapter->stats.rnbc);
2480         device_printf(dev, "Receive length errors = %lld\n",
2481                (long long)adapter->stats.rlec);
2482         device_printf(dev, "Crc errors = %lld\n",
2483                (long long)adapter->stats.crcerrs);
2484         device_printf(dev, "Driver dropped packets = %ld\n",
2485                adapter->dropped_pkts);
2486
2487         device_printf(dev, "XON Rcvd = %lld\n",
2488                (long long)adapter->stats.xonrxc);
2489         device_printf(dev, "XON Xmtd = %lld\n",
2490                (long long)adapter->stats.xontxc);
2491         device_printf(dev, "XOFF Rcvd = %lld\n",
2492                (long long)adapter->stats.xoffrxc);
2493         device_printf(dev, "XOFF Xmtd = %lld\n",
2494                (long long)adapter->stats.xofftxc);
2495
2496         device_printf(dev, "Good Packets Rcvd = %lld\n",
2497                (long long)adapter->stats.gprcl);
2498         device_printf(dev, "Good Packets Xmtd = %lld\n",
2499                (long long)adapter->stats.gptcl);
2500
2501         device_printf(dev, "Jumbo frames recvd = %lld\n",
2502                (long long)adapter->stats.jprcl);
2503         device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2504                (long long)adapter->stats.jptcl);
2505
2506         return;
2507
2508 }
2509
2510 static int
2511 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2512 {
2513         int             error;
2514         int             result;
2515         struct adapter *adapter;
2516
2517         result = -1;
2518         error = sysctl_handle_int(oidp, &result, 0, req);
2519
2520         if (error || !req->newptr)
2521                 return (error);
2522
2523         if (result == 1) {
2524                 adapter = (struct adapter *) arg1;
2525                 ixgb_print_hw_stats(adapter);
2526         }
2527         return error;
2528 }