]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/dev/ixgb/if_ixgb.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / dev / ixgb / if_ixgb.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2004, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /*$FreeBSD$*/
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <dev/ixgb/if_ixgb.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics
44  *********************************************************************/
45 int             ixgb_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Linked list of board private structures for all NICs found
49  *********************************************************************/
50
51 struct adapter *ixgb_adapter_list = NULL;
52
53
54
55 /*********************************************************************
56  *  Driver version
57  *********************************************************************/
58
59 char            ixgb_driver_version[] = "1.0.6";
60 char            ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
61
62 /*********************************************************************
63  *  PCI Device ID Table
64  *
65  *  Used by probe to select devices to load on
66  *  Last field stores an index into ixgb_strings
67  *  Last entry must be all 0s
68  *
69  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70  *********************************************************************/
71
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
73 {
74         /* Intel(R) PRO/10000 Network Connection */
75         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77         /* required last entry */
78         {0, 0, 0, 0, 0}
79 };
80
81 /*********************************************************************
82  *  Table of branding strings for all supported NICs.
83  *********************************************************************/
84
85 static char    *ixgb_strings[] = {
86         "Intel(R) PRO/10GbE Network Driver"
87 };
88
89 /*********************************************************************
90  *  Function prototypes
91  *********************************************************************/
92 static int      ixgb_probe(device_t);
93 static int      ixgb_attach(device_t);
94 static int      ixgb_detach(device_t);
95 static int      ixgb_shutdown(device_t);
96 static void     ixgb_intr(void *);
97 static void     ixgb_start(struct ifnet *);
98 static void     ixgb_start_locked(struct ifnet *);
99 static int      ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void     ixgb_watchdog(struct adapter *);
101 static void     ixgb_init(void *);
102 static void     ixgb_init_locked(struct adapter *);
103 static void     ixgb_stop(void *);
104 static void     ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int      ixgb_media_change(struct ifnet *);
106 static void     ixgb_identify_hardware(struct adapter *);
107 static int      ixgb_allocate_pci_resources(struct adapter *);
108 static void     ixgb_free_pci_resources(struct adapter *);
109 static void     ixgb_local_timer(void *);
110 static int      ixgb_hardware_init(struct adapter *);
111 static int      ixgb_setup_interface(device_t, struct adapter *);
112 static int      ixgb_setup_transmit_structures(struct adapter *);
113 static void     ixgb_initialize_transmit_unit(struct adapter *);
114 static int      ixgb_setup_receive_structures(struct adapter *);
115 static void     ixgb_initialize_receive_unit(struct adapter *);
116 static void     ixgb_enable_intr(struct adapter *);
117 static void     ixgb_disable_intr(struct adapter *);
118 static void     ixgb_free_transmit_structures(struct adapter *);
119 static void     ixgb_free_receive_structures(struct adapter *);
120 static void     ixgb_update_stats_counters(struct adapter *);
121 static void     ixgb_clean_transmit_interrupts(struct adapter *);
122 static int      ixgb_allocate_receive_structures(struct adapter *);
123 static int      ixgb_allocate_transmit_structures(struct adapter *);
124 static int      ixgb_process_receive_interrupts(struct adapter *, int);
125 static void 
126 ixgb_receive_checksum(struct adapter *,
127                       struct ixgb_rx_desc * rx_desc,
128                       struct mbuf *);
129 static void 
130 ixgb_transmit_checksum_setup(struct adapter *,
131                              struct mbuf *,
132                              u_int8_t *);
133 static void     ixgb_set_promisc(struct adapter *);
134 static void     ixgb_disable_promisc(struct adapter *);
135 static void     ixgb_set_multi(struct adapter *);
136 static void     ixgb_print_hw_stats(struct adapter *);
137 static void     ixgb_print_link_status(struct adapter *);
138 static int 
139 ixgb_get_buf(int i, struct adapter *,
140              struct mbuf *);
141 static void     ixgb_enable_vlans(struct adapter * adapter);
142 static int      ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int      ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
144 static int 
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146                 struct ixgb_dma_alloc *, int);
147 static void     ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
150 #endif
151
152 /*********************************************************************
153  *  FreeBSD Device Interface Entry Points
154  *********************************************************************/
155
156 static device_method_t ixgb_methods[] = {
157         /* Device interface */
158         DEVMETHOD(device_probe, ixgb_probe),
159         DEVMETHOD(device_attach, ixgb_attach),
160         DEVMETHOD(device_detach, ixgb_detach),
161         DEVMETHOD(device_shutdown, ixgb_shutdown),
162         {0, 0}
163 };
164
165 static driver_t ixgb_driver = {
166         "ixgb", ixgb_methods, sizeof(struct adapter),
167 };
168
169 static devclass_t ixgb_devclass;
170 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
171
172 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
173 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
174
175 /* some defines for controlling descriptor fetches in h/w */
176 #define RXDCTL_PTHRESH_DEFAULT 128      /* chip considers prefech below this */
177 #define RXDCTL_HTHRESH_DEFAULT 16       /* chip will only prefetch if tail is
178                                          * pushed this many descriptors from
179                                          * head */
180 #define RXDCTL_WTHRESH_DEFAULT 0        /* chip writes back at this many or RXT0 */
181
182
183 /*********************************************************************
184  *  Device identification routine
185  *
186  *  ixgb_probe determines if the driver should be loaded on
187  *  adapter based on PCI vendor/device id of the adapter.
188  *
189  *  return 0 on success, positive on failure
190  *********************************************************************/
191
192 static int
193 ixgb_probe(device_t dev)
194 {
195         ixgb_vendor_info_t *ent;
196
197         u_int16_t       pci_vendor_id = 0;
198         u_int16_t       pci_device_id = 0;
199         u_int16_t       pci_subvendor_id = 0;
200         u_int16_t       pci_subdevice_id = 0;
201         char            adapter_name[60];
202
203         INIT_DEBUGOUT("ixgb_probe: begin");
204
205         pci_vendor_id = pci_get_vendor(dev);
206         if (pci_vendor_id != IXGB_VENDOR_ID)
207                 return (ENXIO);
208
209         pci_device_id = pci_get_device(dev);
210         pci_subvendor_id = pci_get_subvendor(dev);
211         pci_subdevice_id = pci_get_subdevice(dev);
212
213         ent = ixgb_vendor_info_array;
214         while (ent->vendor_id != 0) {
215                 if ((pci_vendor_id == ent->vendor_id) &&
216                     (pci_device_id == ent->device_id) &&
217
218                     ((pci_subvendor_id == ent->subvendor_id) ||
219                      (ent->subvendor_id == PCI_ANY_ID)) &&
220
221                     ((pci_subdevice_id == ent->subdevice_id) ||
222                      (ent->subdevice_id == PCI_ANY_ID))) {
223                         sprintf(adapter_name, "%s, Version - %s",
224                                 ixgb_strings[ent->index],
225                                 ixgb_driver_version);
226                         device_set_desc_copy(dev, adapter_name);
227                         return (BUS_PROBE_DEFAULT);
228                 }
229                 ent++;
230         }
231
232         return (ENXIO);
233 }
234
235 /*********************************************************************
236  *  Device initialization routine
237  *
238  *  The attach entry point is called when the driver is being loaded.
239  *  This routine identifies the type of hardware, allocates all resources
240  *  and initializes the hardware.
241  *
242  *  return 0 on success, positive on failure
243  *********************************************************************/
244
245 static int
246 ixgb_attach(device_t dev)
247 {
248         struct adapter *adapter;
249         int             tsize, rsize;
250         int             error = 0;
251
252         device_printf(dev, "%s\n", ixgb_copyright);
253         INIT_DEBUGOUT("ixgb_attach: begin");
254
255         /* Allocate, clear, and link in our adapter structure */
256         if (!(adapter = device_get_softc(dev))) {
257                 device_printf(dev, "adapter structure allocation failed\n");
258                 return (ENOMEM);
259         }
260         bzero(adapter, sizeof(struct adapter));
261         adapter->dev = dev;
262         adapter->osdep.dev = dev;
263         IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
264
265         if (ixgb_adapter_list != NULL)
266                 ixgb_adapter_list->prev = adapter;
267         adapter->next = ixgb_adapter_list;
268         ixgb_adapter_list = adapter;
269
270         /* SYSCTL APIs */
271         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
272                         SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
273                         OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
274                         (void *)adapter, 0,
275                         ixgb_sysctl_stats, "I", "Statistics");
276
277         callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
278
279         /* Determine hardware revision */
280         ixgb_identify_hardware(adapter);
281
282         /* Parameters (to be read from user) */
283         adapter->num_tx_desc = IXGB_MAX_TXD;
284         adapter->num_rx_desc = IXGB_MAX_RXD;
285         adapter->tx_int_delay = TIDV;
286         adapter->rx_int_delay = RDTR;
287         adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
288
289         adapter->hw.fc.high_water = FCRTH;
290         adapter->hw.fc.low_water = FCRTL;
291         adapter->hw.fc.pause_time = FCPAUSE;
292         adapter->hw.fc.send_xon = TRUE;
293         adapter->hw.fc.type = FLOW_CONTROL;
294
295
296         /* Set the max frame size assuming standard ethernet sized frames */
297         adapter->hw.max_frame_size =
298                 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
299
300         if (ixgb_allocate_pci_resources(adapter)) {
301                 device_printf(dev, "Allocation of PCI resources failed\n");
302                 error = ENXIO;
303                 goto err_pci;
304         }
305         tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
306                              sizeof(struct ixgb_tx_desc), 4096);
307
308         /* Allocate Transmit Descriptor ring */
309         if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
310                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
311                 error = ENOMEM;
312                 goto err_tx_desc;
313         }
314         adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
315
316         rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
317                              sizeof(struct ixgb_rx_desc), 4096);
318
319         /* Allocate Receive Descriptor ring */
320         if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
321                 device_printf(dev, "Unable to allocate rx_desc memory\n");
322                 error = ENOMEM;
323                 goto err_rx_desc;
324         }
325         adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
326
327         /* Allocate multicast array memory. */
328         adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
329             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
330         if (adapter->mta == NULL) {
331                 device_printf(dev, "Can not allocate multicast setup array\n");
332                 error = ENOMEM;
333                 goto err_hw_init;
334         }
335
336         /* Initialize the hardware */
337         if (ixgb_hardware_init(adapter)) {
338                 device_printf(dev, "Unable to initialize the hardware\n");
339                 error = EIO;
340                 goto err_hw_init;
341         }
342         /* Setup OS specific network interface */
343         if (ixgb_setup_interface(dev, adapter) != 0)
344                 goto err_hw_init;
345
346         /* Initialize statistics */
347         ixgb_clear_hw_cntrs(&adapter->hw);
348         ixgb_update_stats_counters(adapter);
349
350         INIT_DEBUGOUT("ixgb_attach: end");
351         return (0);
352
353 err_hw_init:
354         ixgb_dma_free(adapter, &adapter->rxdma);
355 err_rx_desc:
356         ixgb_dma_free(adapter, &adapter->txdma);
357 err_tx_desc:
358 err_pci:
359         if (adapter->ifp != NULL)
360                 if_free(adapter->ifp);
361         ixgb_free_pci_resources(adapter);
362         sysctl_ctx_free(&adapter->sysctl_ctx);
363         free(adapter->mta, M_DEVBUF);
364         return (error);
365
366 }
367
368 /*********************************************************************
369  *  Device removal routine
370  *
371  *  The detach entry point is called when the driver is being removed.
372  *  This routine stops the adapter and deallocates all the resources
373  *  that were allocated for driver operation.
374  *
375  *  return 0 on success, positive on failure
376  *********************************************************************/
377
378 static int
379 ixgb_detach(device_t dev)
380 {
381         struct adapter *adapter = device_get_softc(dev);
382         struct ifnet   *ifp = adapter->ifp;
383
384         INIT_DEBUGOUT("ixgb_detach: begin");
385
386 #ifdef DEVICE_POLLING
387         if (ifp->if_capenable & IFCAP_POLLING)
388                 ether_poll_deregister(ifp);
389 #endif
390
391         IXGB_LOCK(adapter);
392         adapter->in_detach = 1;
393
394         ixgb_stop(adapter);
395         IXGB_UNLOCK(adapter);
396
397 #if __FreeBSD_version < 500000
398         ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
399 #else
400         ether_ifdetach(ifp);
401 #endif
402         callout_drain(&adapter->timer);
403         ixgb_free_pci_resources(adapter);
404 #if __FreeBSD_version >= 500000
405         if_free(ifp);
406 #endif
407
408         /* Free Transmit Descriptor ring */
409         if (adapter->tx_desc_base) {
410                 ixgb_dma_free(adapter, &adapter->txdma);
411                 adapter->tx_desc_base = NULL;
412         }
413         /* Free Receive Descriptor ring */
414         if (adapter->rx_desc_base) {
415                 ixgb_dma_free(adapter, &adapter->rxdma);
416                 adapter->rx_desc_base = NULL;
417         }
418         /* Remove from the adapter list */
419         if (ixgb_adapter_list == adapter)
420                 ixgb_adapter_list = adapter->next;
421         if (adapter->next != NULL)
422                 adapter->next->prev = adapter->prev;
423         if (adapter->prev != NULL)
424                 adapter->prev->next = adapter->next;
425         free(adapter->mta, M_DEVBUF);
426
427         IXGB_LOCK_DESTROY(adapter);
428         return (0);
429 }
430
431 /*********************************************************************
432  *
433  *  Shutdown entry point
434  *
435  **********************************************************************/
436
437 static int
438 ixgb_shutdown(device_t dev)
439 {
440         struct adapter *adapter = device_get_softc(dev);
441         IXGB_LOCK(adapter);
442         ixgb_stop(adapter);
443         IXGB_UNLOCK(adapter);
444         return (0);
445 }
446
447
448 /*********************************************************************
449  *  Transmit entry point
450  *
451  *  ixgb_start is called by the stack to initiate a transmit.
452  *  The driver will remain in this routine as long as there are
453  *  packets to transmit and transmit resources are available.
454  *  In case resources are not available stack is notified and
455  *  the packet is requeued.
456  **********************************************************************/
457
458 static void
459 ixgb_start_locked(struct ifnet * ifp)
460 {
461         struct mbuf    *m_head;
462         struct adapter *adapter = ifp->if_softc;
463
464         IXGB_LOCK_ASSERT(adapter);
465
466         if (!adapter->link_active)
467                 return;
468
469         while (ifp->if_snd.ifq_head != NULL) {
470                 IF_DEQUEUE(&ifp->if_snd, m_head);
471
472                 if (m_head == NULL)
473                         break;
474
475                 if (ixgb_encap(adapter, m_head)) {
476                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
477                         IF_PREPEND(&ifp->if_snd, m_head);
478                         break;
479                 }
480                 /* Send a copy of the frame to the BPF listener */
481 #if __FreeBSD_version < 500000
482                 if (ifp->if_bpf)
483                         bpf_mtap(ifp, m_head);
484 #else
485                 ETHER_BPF_MTAP(ifp, m_head);
486 #endif
487                 /* Set timeout in case hardware has problems transmitting */
488                 adapter->tx_timer = IXGB_TX_TIMEOUT;
489
490         }
491         return;
492 }
493
494 static void
495 ixgb_start(struct ifnet *ifp)
496 {
497         struct adapter *adapter = ifp->if_softc;
498
499         IXGB_LOCK(adapter);
500         ixgb_start_locked(ifp);
501         IXGB_UNLOCK(adapter);
502         return;
503 }
504
505 /*********************************************************************
506  *  Ioctl entry point
507  *
508  *  ixgb_ioctl is called when the user wants to configure the
509  *  interface.
510  *
511  *  return 0 on success, positive on failure
512  **********************************************************************/
513
514 static int
515 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
516 {
517         int             mask, error = 0;
518         struct ifreq   *ifr = (struct ifreq *) data;
519         struct adapter *adapter = ifp->if_softc;
520
521         if (adapter->in_detach)
522                 goto out;
523
524         switch (command) {
525         case SIOCSIFADDR:
526         case SIOCGIFADDR:
527                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
528                 ether_ioctl(ifp, command, data);
529                 break;
530         case SIOCSIFMTU:
531                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
532                 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
533                         error = EINVAL;
534                 } else {
535                         IXGB_LOCK(adapter);
536                         ifp->if_mtu = ifr->ifr_mtu;
537                         adapter->hw.max_frame_size =
538                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
539
540                         ixgb_init_locked(adapter);
541                         IXGB_UNLOCK(adapter);
542                 }
543                 break;
544         case SIOCSIFFLAGS:
545                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
546                 IXGB_LOCK(adapter);
547                 if (ifp->if_flags & IFF_UP) {
548                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
549                                 ixgb_init_locked(adapter);
550                         }
551                         ixgb_disable_promisc(adapter);
552                         ixgb_set_promisc(adapter);
553                 } else {
554                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
555                                 ixgb_stop(adapter);
556                         }
557                 }
558                 IXGB_UNLOCK(adapter);
559                 break;
560         case SIOCADDMULTI:
561         case SIOCDELMULTI:
562                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
563                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
564                         IXGB_LOCK(adapter);
565                         ixgb_disable_intr(adapter);
566                         ixgb_set_multi(adapter);
567                         ixgb_enable_intr(adapter);
568                         IXGB_UNLOCK(adapter);
569                 }
570                 break;
571         case SIOCSIFMEDIA:
572         case SIOCGIFMEDIA:
573                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
574                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
575                 break;
576         case SIOCSIFCAP:
577                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
578                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
579 #ifdef DEVICE_POLLING
580                 if (mask & IFCAP_POLLING) {
581                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
582                                 error = ether_poll_register(ixgb_poll, ifp);
583                                 if (error)
584                                         return(error);
585                                 IXGB_LOCK(adapter);
586                                 ixgb_disable_intr(adapter);
587                                 ifp->if_capenable |= IFCAP_POLLING;
588                                 IXGB_UNLOCK(adapter);
589                         } else {
590                                 error = ether_poll_deregister(ifp);
591                                 /* Enable interrupt even in error case */
592                                 IXGB_LOCK(adapter);
593                                 ixgb_enable_intr(adapter);
594                                 ifp->if_capenable &= ~IFCAP_POLLING;
595                                 IXGB_UNLOCK(adapter);
596                         }
597                 }
598 #endif /* DEVICE_POLLING */
599                 if (mask & IFCAP_HWCSUM) {
600                         if (IFCAP_HWCSUM & ifp->if_capenable)
601                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
602                         else
603                                 ifp->if_capenable |= IFCAP_HWCSUM;
604                         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
605                                 ixgb_init(adapter);
606                 }
607                 break;
608         default:
609                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
610                 error = EINVAL;
611         }
612
613 out:
614         return (error);
615 }
616
617 /*********************************************************************
618  *  Watchdog entry point
619  *
620  *  This routine is called whenever hardware quits transmitting.
621  *
622  **********************************************************************/
623
624 static void
625 ixgb_watchdog(struct adapter *adapter)
626 {
627         struct ifnet *ifp;
628
629         ifp = adapter->ifp;
630
631         /*
632          * If we are in this routine because of pause frames, then don't
633          * reset the hardware.
634          */
635         if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
636                 adapter->tx_timer = IXGB_TX_TIMEOUT;
637                 return;
638         }
639         if_printf(ifp, "watchdog timeout -- resetting\n");
640
641         ixgb_stop(adapter);
642         ixgb_init_locked(adapter);
643
644
645         ifp->if_oerrors++;
646
647         return;
648 }
649
650 /*********************************************************************
651  *  Init entry point
652  *
653  *  This routine is used in two ways. It is used by the stack as
654  *  init entry point in network interface structure. It is also used
655  *  by the driver as a hw/sw initialization routine to get to a
656  *  consistent state.
657  *
658  *  return 0 on success, positive on failure
659  **********************************************************************/
660
661 static void
662 ixgb_init_locked(struct adapter *adapter)
663 {
664         struct ifnet   *ifp;
665
666         INIT_DEBUGOUT("ixgb_init: begin");
667
668         IXGB_LOCK_ASSERT(adapter);
669
670         ixgb_stop(adapter);
671         ifp = adapter->ifp;
672
673         /* Get the latest mac address, User can use a LAA */
674         bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
675             IXGB_ETH_LENGTH_OF_ADDRESS);
676
677         /* Initialize the hardware */
678         if (ixgb_hardware_init(adapter)) {
679                 if_printf(ifp, "Unable to initialize the hardware\n");
680                 return;
681         }
682         ixgb_enable_vlans(adapter);
683
684         /* Prepare transmit descriptors and buffers */
685         if (ixgb_setup_transmit_structures(adapter)) {
686                 if_printf(ifp, "Could not setup transmit structures\n");
687                 ixgb_stop(adapter);
688                 return;
689         }
690         ixgb_initialize_transmit_unit(adapter);
691
692         /* Setup Multicast table */
693         ixgb_set_multi(adapter);
694
695         /* Prepare receive descriptors and buffers */
696         if (ixgb_setup_receive_structures(adapter)) {
697                 if_printf(ifp, "Could not setup receive structures\n");
698                 ixgb_stop(adapter);
699                 return;
700         }
701         ixgb_initialize_receive_unit(adapter);
702
703         /* Don't lose promiscuous settings */
704         ixgb_set_promisc(adapter);
705
706         ifp = adapter->ifp;
707         ifp->if_drv_flags |= IFF_DRV_RUNNING;
708         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
709
710
711         if (ifp->if_capenable & IFCAP_TXCSUM)
712                 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
713         else
714                 ifp->if_hwassist = 0;
715
716
717         /* Enable jumbo frames */
718         if (ifp->if_mtu > ETHERMTU) {
719                 uint32_t        temp_reg;
720                 IXGB_WRITE_REG(&adapter->hw, MFS,
721                                adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
722                 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
723                 temp_reg |= IXGB_CTRL0_JFE;
724                 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
725         }
726         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
727         ixgb_clear_hw_cntrs(&adapter->hw);
728 #ifdef DEVICE_POLLING
729         /*
730          * Only disable interrupts if we are polling, make sure they are on
731          * otherwise.
732          */
733         if (ifp->if_capenable & IFCAP_POLLING)
734                 ixgb_disable_intr(adapter);
735         else
736 #endif
737                 ixgb_enable_intr(adapter);
738
739         return;
740 }
741
742 static void
743 ixgb_init(void *arg)
744 {
745         struct adapter *adapter = arg;
746
747         IXGB_LOCK(adapter);
748         ixgb_init_locked(adapter);
749         IXGB_UNLOCK(adapter);
750         return;
751 }
752
753 #ifdef DEVICE_POLLING
754 static int
755 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
756 {
757         struct adapter *adapter = ifp->if_softc;
758         u_int32_t       reg_icr;
759         int             rx_npkts;
760
761         IXGB_LOCK_ASSERT(adapter);
762
763         if (cmd == POLL_AND_CHECK_STATUS) {
764                 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
765                 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
766                         ixgb_check_for_link(&adapter->hw);
767                         ixgb_print_link_status(adapter);
768                 }
769         }
770         rx_npkts = ixgb_process_receive_interrupts(adapter, count);
771         ixgb_clean_transmit_interrupts(adapter);
772
773         if (ifp->if_snd.ifq_head != NULL)
774                 ixgb_start_locked(ifp);
775         return (rx_npkts);
776 }
777
778 static int
779 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
780 {
781         struct adapter *adapter = ifp->if_softc;
782         int rx_npkts = 0;
783
784         IXGB_LOCK(adapter);
785         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
786                 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
787         IXGB_UNLOCK(adapter);
788         return (rx_npkts);
789 }
790 #endif /* DEVICE_POLLING */
791
792 /*********************************************************************
793  *
794  *  Interrupt Service routine
795  *
796  **********************************************************************/
797
798 static void
799 ixgb_intr(void *arg)
800 {
801         u_int32_t       loop_cnt = IXGB_MAX_INTR;
802         u_int32_t       reg_icr;
803         struct ifnet   *ifp;
804         struct adapter *adapter = arg;
805         boolean_t       rxdmt0 = FALSE;
806
807         IXGB_LOCK(adapter);
808
809         ifp = adapter->ifp;
810
811 #ifdef DEVICE_POLLING
812         if (ifp->if_capenable & IFCAP_POLLING) {
813                 IXGB_UNLOCK(adapter);
814                 return;
815         }
816 #endif
817
818         reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
819         if (reg_icr == 0) {
820                 IXGB_UNLOCK(adapter);
821                 return;
822         }
823
824         if (reg_icr & IXGB_INT_RXDMT0)
825                 rxdmt0 = TRUE;
826
827 #ifdef _SV_
828         if (reg_icr & IXGB_INT_RXDMT0)
829                 adapter->sv_stats.icr_rxdmt0++;
830         if (reg_icr & IXGB_INT_RXO)
831                 adapter->sv_stats.icr_rxo++;
832         if (reg_icr & IXGB_INT_RXT0)
833                 adapter->sv_stats.icr_rxt0++;
834         if (reg_icr & IXGB_INT_TXDW)
835                 adapter->sv_stats.icr_TXDW++;
836 #endif                          /* _SV_ */
837
838         /* Link status change */
839         if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
840                 ixgb_check_for_link(&adapter->hw);
841                 ixgb_print_link_status(adapter);
842         }
843         while (loop_cnt > 0) {
844                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
845                         ixgb_process_receive_interrupts(adapter, -1);
846                         ixgb_clean_transmit_interrupts(adapter);
847                 }
848                 loop_cnt--;
849         }
850
851         if (rxdmt0 && adapter->raidc) {
852                 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
853                 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
854         }
855         if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
856                 ixgb_start_locked(ifp);
857
858         IXGB_UNLOCK(adapter);
859         return;
860 }
861
862
863 /*********************************************************************
864  *
865  *  Media Ioctl callback
866  *
867  *  This routine is called whenever the user queries the status of
868  *  the interface using ifconfig.
869  *
870  **********************************************************************/
871 static void
872 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
873 {
874         struct adapter *adapter = ifp->if_softc;
875
876         INIT_DEBUGOUT("ixgb_media_status: begin");
877
878         ixgb_check_for_link(&adapter->hw);
879         ixgb_print_link_status(adapter);
880
881         ifmr->ifm_status = IFM_AVALID;
882         ifmr->ifm_active = IFM_ETHER;
883
884         if (!adapter->hw.link_up)
885                 return;
886
887         ifmr->ifm_status |= IFM_ACTIVE;
888         ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
889
890         return;
891 }
892
893 /*********************************************************************
894  *
895  *  Media Ioctl callback
896  *
897  *  This routine is called when the user changes speed/duplex using
898  *  media/mediopt option with ifconfig.
899  *
900  **********************************************************************/
901 static int
902 ixgb_media_change(struct ifnet * ifp)
903 {
904         struct adapter *adapter = ifp->if_softc;
905         struct ifmedia *ifm = &adapter->media;
906
907         INIT_DEBUGOUT("ixgb_media_change: begin");
908
909         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
910                 return (EINVAL);
911
912         return (0);
913 }
914
915 /*********************************************************************
916  *
917  *  This routine maps the mbufs to tx descriptors.
918  *
919  *  return 0 on success, positive on failure
920  **********************************************************************/
921
922 static int
923 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
924 {
925         u_int8_t        txd_popts;
926         int             i, j, error, nsegs;
927
928 #if __FreeBSD_version < 500000
929         struct ifvlan  *ifv = NULL;
930 #endif
931         bus_dma_segment_t segs[IXGB_MAX_SCATTER];
932         bus_dmamap_t    map;
933         struct ixgb_buffer *tx_buffer = NULL;
934         struct ixgb_tx_desc *current_tx_desc = NULL;
935         struct ifnet   *ifp = adapter->ifp;
936
937         /*
938          * Force a cleanup if number of TX descriptors available hits the
939          * threshold
940          */
941         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
942                 ixgb_clean_transmit_interrupts(adapter);
943         }
944         if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
945                 adapter->no_tx_desc_avail1++;
946                 return (ENOBUFS);
947         }
948         /*
949          * Map the packet for DMA.
950          */
951         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
952                 adapter->no_tx_map_avail++;
953                 return (ENOMEM);
954         }
955         error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
956                                         &nsegs, BUS_DMA_NOWAIT);
957         if (error != 0) {
958                 adapter->no_tx_dma_setup++;
959                 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
960                        "error %u\n", error);
961                 bus_dmamap_destroy(adapter->txtag, map);
962                 return (error);
963         }
964         KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
965
966         if (nsegs > adapter->num_tx_desc_avail) {
967                 adapter->no_tx_desc_avail2++;
968                 bus_dmamap_destroy(adapter->txtag, map);
969                 return (ENOBUFS);
970         }
971         if (ifp->if_hwassist > 0) {
972                 ixgb_transmit_checksum_setup(adapter, m_head,
973                                              &txd_popts);
974         } else
975                 txd_popts = 0;
976
977         /* Find out if we are in vlan mode */
978 #if __FreeBSD_version < 500000
979         if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
980             m_head->m_pkthdr.rcvif != NULL &&
981             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
982                 ifv = m_head->m_pkthdr.rcvif->if_softc;
983 #elseif __FreeBSD_version < 700000
984         mtag = VLAN_OUTPUT_TAG(ifp, m_head);
985 #endif
986         i = adapter->next_avail_tx_desc;
987         for (j = 0; j < nsegs; j++) {
988                 tx_buffer = &adapter->tx_buffer_area[i];
989                 current_tx_desc = &adapter->tx_desc_base[i];
990
991                 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
992                 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
993                 current_tx_desc->popts = txd_popts;
994                 if (++i == adapter->num_tx_desc)
995                         i = 0;
996
997                 tx_buffer->m_head = NULL;
998         }
999
1000         adapter->num_tx_desc_avail -= nsegs;
1001         adapter->next_avail_tx_desc = i;
1002
1003 #if __FreeBSD_version < 500000
1004         if (ifv != NULL) {
1005                 /* Set the vlan id */
1006                 current_tx_desc->vlan = ifv->ifv_tag;
1007 #elseif __FreeBSD_version < 700000
1008         if (mtag != NULL) {
1009                 /* Set the vlan id */
1010                 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1011 #else
1012         if (m_head->m_flags & M_VLANTAG) {
1013                 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1014 #endif
1015
1016                 /* Tell hardware to add tag */
1017                 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1018         }
1019         tx_buffer->m_head = m_head;
1020         tx_buffer->map = map;
1021         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1022
1023         /*
1024          * Last Descriptor of Packet needs End Of Packet (EOP)
1025          */
1026         current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1027
1028         /*
1029          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1030          * that this frame is available to transmit.
1031          */
1032         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1033
1034         return (0);
1035 }
1036
1037 static void
1038 ixgb_set_promisc(struct adapter * adapter)
1039 {
1040
1041         u_int32_t       reg_rctl;
1042         struct ifnet   *ifp = adapter->ifp;
1043
1044         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1045
1046         if (ifp->if_flags & IFF_PROMISC) {
1047                 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1048                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1049         } else if (ifp->if_flags & IFF_ALLMULTI) {
1050                 reg_rctl |= IXGB_RCTL_MPE;
1051                 reg_rctl &= ~IXGB_RCTL_UPE;
1052                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1053         }
1054         return;
1055 }
1056
1057 static void
1058 ixgb_disable_promisc(struct adapter * adapter)
1059 {
1060         u_int32_t       reg_rctl;
1061
1062         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1063
1064         reg_rctl &= (~IXGB_RCTL_UPE);
1065         reg_rctl &= (~IXGB_RCTL_MPE);
1066         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1067
1068         return;
1069 }
1070
1071
1072 /*********************************************************************
1073  *  Multicast Update
1074  *
1075  *  This routine is called whenever multicast address list is updated.
1076  *
1077  **********************************************************************/
1078
1079 static void
1080 ixgb_set_multi(struct adapter * adapter)
1081 {
1082         u_int32_t       reg_rctl = 0;
1083         u_int8_t        *mta;
1084         struct ifmultiaddr *ifma;
1085         int             mcnt = 0;
1086         struct ifnet   *ifp = adapter->ifp;
1087
1088         IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1089
1090         mta = adapter->mta;
1091         bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1092             MAX_NUM_MULTICAST_ADDRESSES);
1093
1094         if_maddr_rlock(ifp);
1095 #if __FreeBSD_version < 500000
1096         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1097 #else
1098         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1099 #endif
1100                 if (ifma->ifma_addr->sa_family != AF_LINK)
1101                         continue;
1102
1103                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1104                       &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1105                 mcnt++;
1106         }
1107         if_maddr_runlock(ifp);
1108
1109         if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1110                 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1111                 reg_rctl |= IXGB_RCTL_MPE;
1112                 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1113         } else
1114                 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1115
1116         return;
1117 }
1118
1119
1120 /*********************************************************************
1121  *  Timer routine
1122  *
1123  *  This routine checks for link status and updates statistics.
1124  *
1125  **********************************************************************/
1126
1127 static void
1128 ixgb_local_timer(void *arg)
1129 {
1130         struct ifnet   *ifp;
1131         struct adapter *adapter = arg;
1132         ifp = adapter->ifp;
1133
1134         IXGB_LOCK_ASSERT(adapter);
1135
1136         ixgb_check_for_link(&adapter->hw);
1137         ixgb_print_link_status(adapter);
1138         ixgb_update_stats_counters(adapter);
1139         if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1140                 ixgb_print_hw_stats(adapter);
1141         }
1142         if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1143                 ixgb_watchdog(adapter);
1144         callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1145 }
1146
1147 static void
1148 ixgb_print_link_status(struct adapter * adapter)
1149 {
1150         if (adapter->hw.link_up) {
1151                 if (!adapter->link_active) {
1152                         if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1153                                10000,
1154                                "Full Duplex");
1155                         adapter->link_active = 1;
1156                 }
1157         } else {
1158                 if (adapter->link_active) {
1159                         if_printf(adapter->ifp, "Link is Down \n");
1160                         adapter->link_active = 0;
1161                 }
1162         }
1163
1164         return;
1165 }
1166
1167
1168
1169 /*********************************************************************
1170  *
1171  *  This routine disables all traffic on the adapter by issuing a
1172  *  global reset on the MAC and deallocates TX/RX buffers.
1173  *
1174  **********************************************************************/
1175
1176 static void
1177 ixgb_stop(void *arg)
1178 {
1179         struct ifnet   *ifp;
1180         struct adapter *adapter = arg;
1181         ifp = adapter->ifp;
1182
1183         IXGB_LOCK_ASSERT(adapter);
1184
1185         INIT_DEBUGOUT("ixgb_stop: begin\n");
1186         ixgb_disable_intr(adapter);
1187         adapter->hw.adapter_stopped = FALSE;
1188         ixgb_adapter_stop(&adapter->hw);
1189         callout_stop(&adapter->timer);
1190         ixgb_free_transmit_structures(adapter);
1191         ixgb_free_receive_structures(adapter);
1192
1193         /* Tell the stack that the interface is no longer active */
1194         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1195         adapter->tx_timer = 0;
1196
1197         return;
1198 }
1199
1200
1201 /*********************************************************************
1202  *
1203  *  Determine hardware revision.
1204  *
1205  **********************************************************************/
1206 static void
1207 ixgb_identify_hardware(struct adapter * adapter)
1208 {
1209         device_t        dev = adapter->dev;
1210
1211         /* Make sure our PCI config space has the necessary stuff set */
1212         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1213         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1214               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1215                 device_printf(dev,
1216                     "Memory Access and/or Bus Master bits were not set!\n");
1217                 adapter->hw.pci_cmd_word |=
1218                         (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1219                 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1220         }
1221         /* Save off the information about this board */
1222         adapter->hw.vendor_id = pci_get_vendor(dev);
1223         adapter->hw.device_id = pci_get_device(dev);
1224         adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1225         adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1226         adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1227
1228         /* Set MacType, etc. based on this PCI info */
1229         switch (adapter->hw.device_id) {
1230         case IXGB_DEVICE_ID_82597EX:
1231         case IXGB_DEVICE_ID_82597EX_SR:
1232                 adapter->hw.mac_type = ixgb_82597;
1233                 break;
1234         default:
1235                 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1236                 device_printf(dev, "unsupported device id 0x%x\n",
1237                     adapter->hw.device_id);
1238         }
1239
1240         return;
1241 }
1242
1243 static int
1244 ixgb_allocate_pci_resources(struct adapter * adapter)
1245 {
1246         int             rid;
1247         device_t        dev = adapter->dev;
1248
1249         rid = IXGB_MMBA;
1250         adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1251                                                  &rid, 0, ~0, 1,
1252                                                  RF_ACTIVE);
1253         if (!(adapter->res_memory)) {
1254                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1255                 return (ENXIO);
1256         }
1257         adapter->osdep.mem_bus_space_tag =
1258                 rman_get_bustag(adapter->res_memory);
1259         adapter->osdep.mem_bus_space_handle =
1260                 rman_get_bushandle(adapter->res_memory);
1261         adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1262
1263         rid = 0x0;
1264         adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1265                                                     &rid, 0, ~0, 1,
1266                                                   RF_SHAREABLE | RF_ACTIVE);
1267         if (!(adapter->res_interrupt)) {
1268                 device_printf(dev,
1269                     "Unable to allocate bus resource: interrupt\n");
1270                 return (ENXIO);
1271         }
1272         if (bus_setup_intr(dev, adapter->res_interrupt,
1273                            INTR_TYPE_NET | INTR_MPSAFE,
1274                            NULL, (void (*) (void *))ixgb_intr, adapter,
1275                            &adapter->int_handler_tag)) {
1276                 device_printf(dev, "Error registering interrupt handler!\n");
1277                 return (ENXIO);
1278         }
1279         adapter->hw.back = &adapter->osdep;
1280
1281         return (0);
1282 }
1283
1284 static void
1285 ixgb_free_pci_resources(struct adapter * adapter)
1286 {
1287         device_t        dev = adapter->dev;
1288
1289         if (adapter->res_interrupt != NULL) {
1290                 bus_teardown_intr(dev, adapter->res_interrupt,
1291                                   adapter->int_handler_tag);
1292                 bus_release_resource(dev, SYS_RES_IRQ, 0,
1293                                      adapter->res_interrupt);
1294         }
1295         if (adapter->res_memory != NULL) {
1296                 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1297                                      adapter->res_memory);
1298         }
1299         if (adapter->res_ioport != NULL) {
1300                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1301                                      adapter->res_ioport);
1302         }
1303         return;
1304 }
1305
1306 /*********************************************************************
1307  *
1308  *  Initialize the hardware to a configuration as specified by the
1309  *  adapter structure. The controller is reset, the EEPROM is
1310  *  verified, the MAC address is set, then the shared initialization
1311  *  routines are called.
1312  *
1313  **********************************************************************/
1314 static int
1315 ixgb_hardware_init(struct adapter * adapter)
1316 {
1317         /* Issue a global reset */
1318         adapter->hw.adapter_stopped = FALSE;
1319         ixgb_adapter_stop(&adapter->hw);
1320
1321         /* Make sure we have a good EEPROM before we read from it */
1322         if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1323                 device_printf(adapter->dev,
1324                     "The EEPROM Checksum Is Not Valid\n");
1325                 return (EIO);
1326         }
1327         if (!ixgb_init_hw(&adapter->hw)) {
1328                 device_printf(adapter->dev, "Hardware Initialization Failed");
1329                 return (EIO);
1330         }
1331
1332         return (0);
1333 }
1334
1335 /*********************************************************************
1336  *
1337  *  Setup networking device structure and register an interface.
1338  *
1339  **********************************************************************/
1340 static int
1341 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1342 {
1343         struct ifnet   *ifp;
1344         INIT_DEBUGOUT("ixgb_setup_interface: begin");
1345
1346         ifp = adapter->ifp = if_alloc(IFT_ETHER);
1347         if (ifp == NULL) {
1348                 device_printf(dev, "can not allocate ifnet structure\n");
1349                 return (-1);
1350         }
1351 #if __FreeBSD_version >= 502000
1352         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1353 #else
1354         ifp->if_unit = device_get_unit(dev);
1355         ifp->if_name = "ixgb";
1356 #endif
1357         ifp->if_mtu = ETHERMTU;
1358         ifp->if_baudrate = 1000000000;
1359         ifp->if_init = ixgb_init;
1360         ifp->if_softc = adapter;
1361         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1362         ifp->if_ioctl = ixgb_ioctl;
1363         ifp->if_start = ixgb_start;
1364         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1365
1366 #if __FreeBSD_version < 500000
1367         ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1368 #else
1369         ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1370 #endif
1371
1372         ifp->if_capabilities = IFCAP_HWCSUM;
1373
1374         /*
1375          * Tell the upper layer(s) we support long frames.
1376          */
1377         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1378
1379 #if __FreeBSD_version >= 500000
1380         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1381 #endif
1382
1383         ifp->if_capenable = ifp->if_capabilities;
1384
1385 #ifdef DEVICE_POLLING
1386         ifp->if_capabilities |= IFCAP_POLLING;
1387 #endif
1388
1389         /*
1390          * Specify the media types supported by this adapter and register
1391          * callbacks to update media and link information
1392          */
1393         ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1394                      ixgb_media_status);
1395         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1396                     0, NULL);
1397         ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1398                     0, NULL);
1399         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1400         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1401
1402         return (0);
1403 }
1404
1405 /********************************************************************
1406  * Manage DMA'able memory.
1407  *******************************************************************/
1408 static void
1409 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1410 {
1411         if (error)
1412                 return;
1413         *(bus_addr_t *) arg = segs->ds_addr;
1414         return;
1415 }
1416
1417 static int
1418 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1419                 struct ixgb_dma_alloc * dma, int mapflags)
1420 {
1421         device_t dev;
1422         int             r;
1423
1424         dev = adapter->dev;
1425         r = bus_dma_tag_create(bus_get_dma_tag(dev),    /* parent */
1426                                PAGE_SIZE, 0,    /* alignment, bounds */
1427                                BUS_SPACE_MAXADDR,       /* lowaddr */
1428                                BUS_SPACE_MAXADDR,       /* highaddr */
1429                                NULL, NULL,      /* filter, filterarg */
1430                                size,    /* maxsize */
1431                                1,       /* nsegments */
1432                                size,    /* maxsegsize */
1433                                BUS_DMA_ALLOCNOW,        /* flags */
1434 #if __FreeBSD_version >= 502000
1435                                NULL,    /* lockfunc */
1436                                NULL,    /* lockfuncarg */
1437 #endif
1438                                &dma->dma_tag);
1439         if (r != 0) {
1440                 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1441                        "error %u\n", r);
1442                 goto fail_0;
1443         }
1444         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1445                              BUS_DMA_NOWAIT, &dma->dma_map);
1446         if (r != 0) {
1447                 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1448                        "error %u\n", r);
1449                 goto fail_1;
1450         }
1451         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1452                             size,
1453                             ixgb_dmamap_cb,
1454                             &dma->dma_paddr,
1455                             mapflags | BUS_DMA_NOWAIT);
1456         if (r != 0) {
1457                 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1458                        "error %u\n", r);
1459                 goto fail_2;
1460         }
1461         dma->dma_size = size;
1462         return (0);
1463 fail_2:
1464         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1465 fail_1:
1466         bus_dma_tag_destroy(dma->dma_tag);
1467 fail_0:
1468         dma->dma_map = NULL;
1469         dma->dma_tag = NULL;
1470         return (r);
1471 }
1472
1473
1474
1475 static void
1476 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1477 {
1478         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1479         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1480         bus_dma_tag_destroy(dma->dma_tag);
1481 }
1482
1483 /*********************************************************************
1484  *
1485  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1486  *  the information needed to transmit a packet on the wire.
1487  *
1488  **********************************************************************/
1489 static int
1490 ixgb_allocate_transmit_structures(struct adapter * adapter)
1491 {
1492         if (!(adapter->tx_buffer_area =
1493               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1494                                             adapter->num_tx_desc, M_DEVBUF,
1495                                             M_NOWAIT | M_ZERO))) {
1496                 device_printf(adapter->dev,
1497                     "Unable to allocate tx_buffer memory\n");
1498                 return ENOMEM;
1499         }
1500         bzero(adapter->tx_buffer_area,
1501               sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1502
1503         return 0;
1504 }
1505
1506 /*********************************************************************
1507  *
1508  *  Allocate and initialize transmit structures.
1509  *
1510  **********************************************************************/
1511 static int
1512 ixgb_setup_transmit_structures(struct adapter * adapter)
1513 {
1514         /*
1515          * Setup DMA descriptor areas.
1516          */
1517         if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
1518                                PAGE_SIZE, 0,    /* alignment, bounds */
1519                                BUS_SPACE_MAXADDR,       /* lowaddr */
1520                                BUS_SPACE_MAXADDR,       /* highaddr */
1521                                NULL, NULL,      /* filter, filterarg */
1522                                MCLBYTES * IXGB_MAX_SCATTER,     /* maxsize */
1523                                IXGB_MAX_SCATTER,        /* nsegments */
1524                                MCLBYTES,        /* maxsegsize */
1525                                BUS_DMA_ALLOCNOW,        /* flags */
1526 #if __FreeBSD_version >= 502000
1527                                NULL,    /* lockfunc */
1528                                NULL,    /* lockfuncarg */
1529 #endif
1530                                &adapter->txtag)) {
1531                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1532                 return (ENOMEM);
1533         }
1534         if (ixgb_allocate_transmit_structures(adapter))
1535                 return ENOMEM;
1536
1537         bzero((void *)adapter->tx_desc_base,
1538               (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1539
1540         adapter->next_avail_tx_desc = 0;
1541         adapter->oldest_used_tx_desc = 0;
1542
1543         /* Set number of descriptors available */
1544         adapter->num_tx_desc_avail = adapter->num_tx_desc;
1545
1546         /* Set checksum context */
1547         adapter->active_checksum_context = OFFLOAD_NONE;
1548
1549         return 0;
1550 }
1551
1552 /*********************************************************************
1553  *
1554  *  Enable transmit unit.
1555  *
1556  **********************************************************************/
1557 static void
1558 ixgb_initialize_transmit_unit(struct adapter * adapter)
1559 {
1560         u_int32_t       reg_tctl;
1561         u_int64_t       tdba = adapter->txdma.dma_paddr;
1562
1563         /* Setup the Base and Length of the Tx Descriptor Ring */
1564         IXGB_WRITE_REG(&adapter->hw, TDBAL,
1565                        (tdba & 0x00000000ffffffffULL));
1566         IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1567         IXGB_WRITE_REG(&adapter->hw, TDLEN,
1568                        adapter->num_tx_desc *
1569                        sizeof(struct ixgb_tx_desc));
1570
1571         /* Setup the HW Tx Head and Tail descriptor pointers */
1572         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1573         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1574
1575
1576         HW_DEBUGOUT2("Base = %x, Length = %x\n",
1577                      IXGB_READ_REG(&adapter->hw, TDBAL),
1578                      IXGB_READ_REG(&adapter->hw, TDLEN));
1579
1580         IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1581
1582
1583         /* Program the Transmit Control Register */
1584         reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1585         reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1586         IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1587
1588         /* Setup Transmit Descriptor Settings for this adapter */
1589         adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1590
1591         if (adapter->tx_int_delay > 0)
1592                 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1593         return;
1594 }
1595
1596 /*********************************************************************
1597  *
1598  *  Free all transmit related data structures.
1599  *
1600  **********************************************************************/
1601 static void
1602 ixgb_free_transmit_structures(struct adapter * adapter)
1603 {
1604         struct ixgb_buffer *tx_buffer;
1605         int             i;
1606
1607         INIT_DEBUGOUT("free_transmit_structures: begin");
1608
1609         if (adapter->tx_buffer_area != NULL) {
1610                 tx_buffer = adapter->tx_buffer_area;
1611                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1612                         if (tx_buffer->m_head != NULL) {
1613                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1614                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1615                                 m_freem(tx_buffer->m_head);
1616                         }
1617                         tx_buffer->m_head = NULL;
1618                 }
1619         }
1620         if (adapter->tx_buffer_area != NULL) {
1621                 free(adapter->tx_buffer_area, M_DEVBUF);
1622                 adapter->tx_buffer_area = NULL;
1623         }
1624         if (adapter->txtag != NULL) {
1625                 bus_dma_tag_destroy(adapter->txtag);
1626                 adapter->txtag = NULL;
1627         }
1628         return;
1629 }
1630
1631 /*********************************************************************
1632  *
1633  *  The offload context needs to be set when we transfer the first
1634  *  packet of a particular protocol (TCP/UDP). We change the
1635  *  context only if the protocol type changes.
1636  *
1637  **********************************************************************/
1638 static void
1639 ixgb_transmit_checksum_setup(struct adapter * adapter,
1640                              struct mbuf * mp,
1641                              u_int8_t * txd_popts)
1642 {
1643         struct ixgb_context_desc *TXD;
1644         struct ixgb_buffer *tx_buffer;
1645         int             curr_txd;
1646
1647         if (mp->m_pkthdr.csum_flags) {
1648
1649                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1650                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1652                                 return;
1653                         else
1654                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1655                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1656                         *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1657                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1658                                 return;
1659                         else
1660                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1661                 } else {
1662                         *txd_popts = 0;
1663                         return;
1664                 }
1665         } else {
1666                 *txd_popts = 0;
1667                 return;
1668         }
1669
1670         /*
1671          * If we reach this point, the checksum offload context needs to be
1672          * reset.
1673          */
1674         curr_txd = adapter->next_avail_tx_desc;
1675         tx_buffer = &adapter->tx_buffer_area[curr_txd];
1676         TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1677
1678
1679         TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1680         TXD->tucse = 0;
1681
1682         TXD->mss = 0;
1683
1684         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1685                 TXD->tucso =
1686                         ENET_HEADER_SIZE + sizeof(struct ip) +
1687                         offsetof(struct tcphdr, th_sum);
1688         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1689                 TXD->tucso =
1690                         ENET_HEADER_SIZE + sizeof(struct ip) +
1691                         offsetof(struct udphdr, uh_sum);
1692         }
1693         TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1694
1695         tx_buffer->m_head = NULL;
1696
1697         if (++curr_txd == adapter->num_tx_desc)
1698                 curr_txd = 0;
1699
1700         adapter->num_tx_desc_avail--;
1701         adapter->next_avail_tx_desc = curr_txd;
1702         return;
1703 }
1704
1705 /**********************************************************************
1706  *
1707  *  Examine each tx_buffer in the used queue. If the hardware is done
1708  *  processing the packet then free associated resources. The
1709  *  tx_buffer is put back on the free queue.
1710  *
1711  **********************************************************************/
1712 static void
1713 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1714 {
1715         int             i, num_avail;
1716         struct ixgb_buffer *tx_buffer;
1717         struct ixgb_tx_desc *tx_desc;
1718
1719         IXGB_LOCK_ASSERT(adapter);
1720
1721         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1722                 return;
1723
1724 #ifdef _SV_
1725         adapter->clean_tx_interrupts++;
1726 #endif
1727         num_avail = adapter->num_tx_desc_avail;
1728         i = adapter->oldest_used_tx_desc;
1729
1730         tx_buffer = &adapter->tx_buffer_area[i];
1731         tx_desc = &adapter->tx_desc_base[i];
1732
1733         while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1734
1735                 tx_desc->status = 0;
1736                 num_avail++;
1737
1738                 if (tx_buffer->m_head) {
1739                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1740                                         BUS_DMASYNC_POSTWRITE);
1741                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1742                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1743                         m_freem(tx_buffer->m_head);
1744                         tx_buffer->m_head = NULL;
1745                 }
1746                 if (++i == adapter->num_tx_desc)
1747                         i = 0;
1748
1749                 tx_buffer = &adapter->tx_buffer_area[i];
1750                 tx_desc = &adapter->tx_desc_base[i];
1751         }
1752
1753         adapter->oldest_used_tx_desc = i;
1754
1755         /*
1756          * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1757          * it is OK to send packets. If there are no pending descriptors,
1758          * clear the timeout. Otherwise, if some descriptors have been freed,
1759          * restart the timeout.
1760          */
1761         if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1762                 struct ifnet   *ifp = adapter->ifp;
1763
1764                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1765                 if (num_avail == adapter->num_tx_desc)
1766                         adapter->tx_timer = 0;
1767                 else if (num_avail == adapter->num_tx_desc_avail)
1768                         adapter->tx_timer = IXGB_TX_TIMEOUT;
1769         }
1770         adapter->num_tx_desc_avail = num_avail;
1771         return;
1772 }
1773
1774
1775 /*********************************************************************
1776  *
1777  *  Get a buffer from system mbuf buffer pool.
1778  *
1779  **********************************************************************/
1780 static int
1781 ixgb_get_buf(int i, struct adapter * adapter,
1782              struct mbuf * nmp)
1783 {
1784         register struct mbuf *mp = nmp;
1785         struct ixgb_buffer *rx_buffer;
1786         struct ifnet   *ifp;
1787         bus_addr_t      paddr;
1788         int             error;
1789
1790         ifp = adapter->ifp;
1791
1792         if (mp == NULL) {
1793
1794                 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1795
1796                 if (mp == NULL) {
1797                         adapter->mbuf_alloc_failed++;
1798                         return (ENOBUFS);
1799                 }
1800                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1801         } else {
1802                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1803                 mp->m_data = mp->m_ext.ext_buf;
1804                 mp->m_next = NULL;
1805         }
1806
1807         if (ifp->if_mtu <= ETHERMTU) {
1808                 m_adj(mp, ETHER_ALIGN);
1809         }
1810         rx_buffer = &adapter->rx_buffer_area[i];
1811
1812         /*
1813          * Using memory from the mbuf cluster pool, invoke the bus_dma
1814          * machinery to arrange the memory mapping.
1815          */
1816         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1817                                 mtod(mp, void *), mp->m_len,
1818                                 ixgb_dmamap_cb, &paddr, 0);
1819         if (error) {
1820                 m_free(mp);
1821                 return (error);
1822         }
1823         rx_buffer->m_head = mp;
1824         adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1825         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1826
1827         return (0);
1828 }
1829
1830 /*********************************************************************
1831  *
1832  *  Allocate memory for rx_buffer structures. Since we use one
1833  *  rx_buffer per received packet, the maximum number of rx_buffer's
1834  *  that we'll need is equal to the number of receive descriptors
1835  *  that we've allocated.
1836  *
1837  **********************************************************************/
1838 static int
1839 ixgb_allocate_receive_structures(struct adapter * adapter)
1840 {
1841         int             i, error;
1842         struct ixgb_buffer *rx_buffer;
1843
1844         if (!(adapter->rx_buffer_area =
1845               (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1846                                             adapter->num_rx_desc, M_DEVBUF,
1847                                             M_NOWAIT | M_ZERO))) {
1848                 device_printf(adapter->dev,
1849                     "Unable to allocate rx_buffer memory\n");
1850                 return (ENOMEM);
1851         }
1852         bzero(adapter->rx_buffer_area,
1853               sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1854
1855         error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1856                                    PAGE_SIZE, 0,        /* alignment, bounds */
1857                                    BUS_SPACE_MAXADDR,   /* lowaddr */
1858                                    BUS_SPACE_MAXADDR,   /* highaddr */
1859                                    NULL, NULL,  /* filter, filterarg */
1860                                    MCLBYTES,    /* maxsize */
1861                                    1,   /* nsegments */
1862                                    MCLBYTES,    /* maxsegsize */
1863                                    BUS_DMA_ALLOCNOW,    /* flags */
1864 #if __FreeBSD_version >= 502000
1865                                    NULL,        /* lockfunc */
1866                                    NULL,        /* lockfuncarg */
1867 #endif
1868                                    &adapter->rxtag);
1869         if (error != 0) {
1870                 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1871                        "bus_dma_tag_create failed; error %u\n",
1872                        error);
1873                 goto fail_0;
1874         }
1875         rx_buffer = adapter->rx_buffer_area;
1876         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1877                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1878                                           &rx_buffer->map);
1879                 if (error != 0) {
1880                         device_printf(adapter->dev,
1881                                "ixgb_allocate_receive_structures: "
1882                                "bus_dmamap_create failed; error %u\n",
1883                                error);
1884                         goto fail_1;
1885                 }
1886         }
1887
1888         for (i = 0; i < adapter->num_rx_desc; i++) {
1889                 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1890                         adapter->rx_buffer_area[i].m_head = NULL;
1891                         adapter->rx_desc_base[i].buff_addr = 0;
1892                         return (ENOBUFS);
1893                 }
1894         }
1895
1896         return (0);
1897 fail_1:
1898         bus_dma_tag_destroy(adapter->rxtag);
1899 fail_0:
1900         adapter->rxtag = NULL;
1901         free(adapter->rx_buffer_area, M_DEVBUF);
1902         adapter->rx_buffer_area = NULL;
1903         return (error);
1904 }
1905
1906 /*********************************************************************
1907  *
1908  *  Allocate and initialize receive structures.
1909  *
1910  **********************************************************************/
1911 static int
1912 ixgb_setup_receive_structures(struct adapter * adapter)
1913 {
1914         bzero((void *)adapter->rx_desc_base,
1915               (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1916
1917         if (ixgb_allocate_receive_structures(adapter))
1918                 return ENOMEM;
1919
1920         /* Setup our descriptor pointers */
1921         adapter->next_rx_desc_to_check = 0;
1922         adapter->next_rx_desc_to_use = 0;
1923         return (0);
1924 }
1925
1926 /*********************************************************************
1927  *
1928  *  Enable receive unit.
1929  *
1930  **********************************************************************/
1931 static void
1932 ixgb_initialize_receive_unit(struct adapter * adapter)
1933 {
1934         u_int32_t       reg_rctl;
1935         u_int32_t       reg_rxcsum;
1936         u_int32_t       reg_rxdctl;
1937         struct ifnet   *ifp;
1938         u_int64_t       rdba = adapter->rxdma.dma_paddr;
1939
1940         ifp = adapter->ifp;
1941
1942         /*
1943          * Make sure receives are disabled while setting up the descriptor
1944          * ring
1945          */
1946         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1947         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1948
1949         /* Set the Receive Delay Timer Register */
1950         IXGB_WRITE_REG(&adapter->hw, RDTR,
1951                        adapter->rx_int_delay);
1952
1953
1954         /* Setup the Base and Length of the Rx Descriptor Ring */
1955         IXGB_WRITE_REG(&adapter->hw, RDBAL,
1956                        (rdba & 0x00000000ffffffffULL));
1957         IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1958         IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1959                        sizeof(struct ixgb_rx_desc));
1960
1961         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1962         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1963
1964         IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1965
1966
1967
1968         reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1969                 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1970                 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1971         IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1972
1973
1974         adapter->raidc = 1;
1975         if (adapter->raidc) {
1976                 uint32_t        raidc;
1977                 uint8_t         poll_threshold;
1978 #define IXGB_RAIDC_POLL_DEFAULT 120
1979
1980                 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1981                 poll_threshold >>= 1;
1982                 poll_threshold &= 0x3F;
1983                 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1984                         (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1985                         (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1986                         poll_threshold;
1987                 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1988         }
1989         /* Enable Receive Checksum Offload for TCP and UDP ? */
1990         if (ifp->if_capenable & IFCAP_RXCSUM) {
1991                 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1992                 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1993                 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1994         }
1995         /* Setup the Receive Control Register */
1996         reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1997         reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1998         reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1999                 IXGB_RCTL_CFF |
2000                 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
2001
2002         switch (adapter->rx_buffer_len) {
2003         default:
2004         case IXGB_RXBUFFER_2048:
2005                 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2006                 break;
2007         case IXGB_RXBUFFER_4096:
2008                 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2009                 break;
2010         case IXGB_RXBUFFER_8192:
2011                 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2012                 break;
2013         case IXGB_RXBUFFER_16384:
2014                 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2015                 break;
2016         }
2017
2018         reg_rctl |= IXGB_RCTL_RXEN;
2019
2020
2021         /* Enable Receives */
2022         IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2023
2024         return;
2025 }
2026
2027 /*********************************************************************
2028  *
2029  *  Free receive related data structures.
2030  *
2031  **********************************************************************/
2032 static void
2033 ixgb_free_receive_structures(struct adapter * adapter)
2034 {
2035         struct ixgb_buffer *rx_buffer;
2036         int             i;
2037
2038         INIT_DEBUGOUT("free_receive_structures: begin");
2039
2040         if (adapter->rx_buffer_area != NULL) {
2041                 rx_buffer = adapter->rx_buffer_area;
2042                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2043                         if (rx_buffer->map != NULL) {
2044                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2045                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2046                         }
2047                         if (rx_buffer->m_head != NULL)
2048                                 m_freem(rx_buffer->m_head);
2049                         rx_buffer->m_head = NULL;
2050                 }
2051         }
2052         if (adapter->rx_buffer_area != NULL) {
2053                 free(adapter->rx_buffer_area, M_DEVBUF);
2054                 adapter->rx_buffer_area = NULL;
2055         }
2056         if (adapter->rxtag != NULL) {
2057                 bus_dma_tag_destroy(adapter->rxtag);
2058                 adapter->rxtag = NULL;
2059         }
2060         return;
2061 }
2062
2063 /*********************************************************************
2064  *
2065  *  This routine executes in interrupt context. It replenishes
2066  *  the mbufs in the descriptor and sends data which has been
2067  *  dma'ed into host memory to upper layer.
2068  *
2069  *  We loop at most count times if count is > 0, or until done if
2070  *  count < 0.
2071  *
2072  *********************************************************************/
2073 static int
2074 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2075 {
2076         struct ifnet   *ifp;
2077         struct mbuf    *mp;
2078 #if __FreeBSD_version < 500000
2079         struct ether_header *eh;
2080 #endif
2081         int             eop = 0;
2082         int             len;
2083         u_int8_t        accept_frame = 0;
2084         int             i;
2085         int             next_to_use = 0;
2086         int             eop_desc;
2087         int             rx_npkts = 0;
2088         /* Pointer to the receive descriptor being examined. */
2089         struct ixgb_rx_desc *current_desc;
2090
2091         IXGB_LOCK_ASSERT(adapter);
2092
2093         ifp = adapter->ifp;
2094         i = adapter->next_rx_desc_to_check;
2095         next_to_use = adapter->next_rx_desc_to_use;
2096         eop_desc = adapter->next_rx_desc_to_check;
2097         current_desc = &adapter->rx_desc_base[i];
2098
2099         if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2100 #ifdef _SV_
2101                 adapter->no_pkts_avail++;
2102 #endif
2103                 return (rx_npkts);
2104         }
2105         while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2106
2107                 mp = adapter->rx_buffer_area[i].m_head;
2108                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2109                                 BUS_DMASYNC_POSTREAD);
2110                 accept_frame = 1;
2111                 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2112                         count--;
2113                         eop = 1;
2114                 } else {
2115                         eop = 0;
2116                 }
2117                 len = current_desc->length;
2118
2119                 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2120                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2121                                             IXGB_RX_DESC_ERRORS_RXE)) {
2122                         accept_frame = 0;
2123                 }
2124                 if (accept_frame) {
2125
2126                         /* Assign correct length to the current fragment */
2127                         mp->m_len = len;
2128
2129                         if (adapter->fmp == NULL) {
2130                                 mp->m_pkthdr.len = len;
2131                                 adapter->fmp = mp;      /* Store the first mbuf */
2132                                 adapter->lmp = mp;
2133                         } else {
2134                                 /* Chain mbuf's together */
2135                                 mp->m_flags &= ~M_PKTHDR;
2136                                 adapter->lmp->m_next = mp;
2137                                 adapter->lmp = adapter->lmp->m_next;
2138                                 adapter->fmp->m_pkthdr.len += len;
2139                         }
2140
2141                         if (eop) {
2142                                 eop_desc = i;
2143                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2144
2145 #if __FreeBSD_version < 500000
2146                                 eh = mtod(adapter->fmp, struct ether_header *);
2147
2148                                 /* Remove ethernet header from mbuf */
2149                                 m_adj(adapter->fmp, sizeof(struct ether_header));
2150                                 ixgb_receive_checksum(adapter, current_desc,
2151                                                       adapter->fmp);
2152
2153                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2154                                         VLAN_INPUT_TAG(eh, adapter->fmp,
2155                                                      current_desc->special);
2156                                 else
2157                                         ether_input(ifp, eh, adapter->fmp);
2158 #else
2159                                 ixgb_receive_checksum(adapter, current_desc,
2160                                                       adapter->fmp);
2161 #if __FreeBSD_version < 700000
2162                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2163                                         VLAN_INPUT_TAG(ifp, adapter->fmp,
2164                                                        current_desc->special);
2165 #else
2166                                 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2167                                         adapter->fmp->m_pkthdr.ether_vtag =
2168                                             current_desc->special;
2169                                         adapter->fmp->m_flags |= M_VLANTAG;
2170                                 }
2171 #endif
2172
2173                                 if (adapter->fmp != NULL) {
2174                                         IXGB_UNLOCK(adapter);
2175                                         (*ifp->if_input) (ifp, adapter->fmp);
2176                                         IXGB_LOCK(adapter);
2177                                         rx_npkts++;
2178                                 }
2179 #endif
2180                                 adapter->fmp = NULL;
2181                                 adapter->lmp = NULL;
2182                         }
2183                         adapter->rx_buffer_area[i].m_head = NULL;
2184                 } else {
2185                         adapter->dropped_pkts++;
2186                         if (adapter->fmp != NULL)
2187                                 m_freem(adapter->fmp);
2188                         adapter->fmp = NULL;
2189                         adapter->lmp = NULL;
2190                 }
2191
2192                 /* Zero out the receive descriptors status  */
2193                 current_desc->status = 0;
2194
2195                 /* Advance our pointers to the next descriptor */
2196                 if (++i == adapter->num_rx_desc) {
2197                         i = 0;
2198                         current_desc = adapter->rx_desc_base;
2199                 } else
2200                         current_desc++;
2201         }
2202         adapter->next_rx_desc_to_check = i;
2203
2204         if (--i < 0)
2205                 i = (adapter->num_rx_desc - 1);
2206
2207         /*
2208          * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2209          * memory corruption). Avoid using and re-submitting the most recently received RX
2210          * descriptor back to hardware.
2211          *
2212          * if(Last written back descriptor == EOP bit set descriptor)
2213          *      then avoid re-submitting the most recently received RX descriptor 
2214          *      back to hardware.
2215          * if(Last written back descriptor != EOP bit set descriptor)
2216          *      then avoid re-submitting the most recently received RX descriptors
2217          *      till last EOP bit set descriptor. 
2218          */
2219         if (eop_desc != i) {
2220                 if (++eop_desc == adapter->num_rx_desc)
2221                         eop_desc = 0;
2222                 i = eop_desc;
2223         }
2224         /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2225         while (next_to_use != i) {
2226                 current_desc = &adapter->rx_desc_base[next_to_use];
2227                 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2228                             IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2229                                              IXGB_RX_DESC_ERRORS_RXE))) {
2230                         mp = adapter->rx_buffer_area[next_to_use].m_head;
2231                         ixgb_get_buf(next_to_use, adapter, mp);
2232                 } else {
2233                         if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2234                                 break;
2235                 }
2236                 /* Advance our pointers to the next descriptor */
2237                 if (++next_to_use == adapter->num_rx_desc) {
2238                         next_to_use = 0;
2239                         current_desc = adapter->rx_desc_base;
2240                 } else
2241                         current_desc++;
2242         }
2243         adapter->next_rx_desc_to_use = next_to_use;
2244         if (--next_to_use < 0)
2245                 next_to_use = (adapter->num_rx_desc - 1);
2246         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
2247         IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2248
2249         return (rx_npkts);
2250 }
2251
2252 /*********************************************************************
2253  *
2254  *  Verify that the hardware indicated that the checksum is valid.
2255  *  Inform the stack about the status of checksum so that stack
2256  *  doesn't spend time verifying the checksum.
2257  *
2258  *********************************************************************/
2259 static void
2260 ixgb_receive_checksum(struct adapter * adapter,
2261                       struct ixgb_rx_desc * rx_desc,
2262                       struct mbuf * mp)
2263 {
2264         if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2265                 mp->m_pkthdr.csum_flags = 0;
2266                 return;
2267         }
2268         if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2269                 /* Did it pass? */
2270                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2271                         /* IP Checksum Good */
2272                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2273                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2274
2275                 } else {
2276                         mp->m_pkthdr.csum_flags = 0;
2277                 }
2278         }
2279         if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2280                 /* Did it pass? */
2281                 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2282                         mp->m_pkthdr.csum_flags |=
2283                                 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2284                         mp->m_pkthdr.csum_data = htons(0xffff);
2285                 }
2286         }
2287         return;
2288 }
2289
2290
2291 static void
2292 ixgb_enable_vlans(struct adapter * adapter)
2293 {
2294         uint32_t        ctrl;
2295
2296         ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2297         ctrl |= IXGB_CTRL0_VME;
2298         IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2299
2300         return;
2301 }
2302
2303
2304 static void
2305 ixgb_enable_intr(struct adapter * adapter)
2306 {
2307         IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2308                             IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2309         return;
2310 }
2311
2312 static void
2313 ixgb_disable_intr(struct adapter * adapter)
2314 {
2315         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2316         return;
2317 }
2318
2319 void
2320 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2321                    uint32_t reg,
2322                    uint16_t * value)
2323 {
2324         pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2325                          *value, 2);
2326 }
2327
2328 /**********************************************************************
2329  *
2330  *  Update the board statistics counters.
2331  *
2332  **********************************************************************/
2333 static void
2334 ixgb_update_stats_counters(struct adapter * adapter)
2335 {
2336         struct ifnet   *ifp;
2337
2338         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2339         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2340         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2341         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2342         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2343         adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2344         adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2345         adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2346         adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2347         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2348
2349         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2350         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2351         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2352         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2353         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2354         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2355         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2356         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2357         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2358         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2359         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2360         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2361         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2362         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2363         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2364         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2365         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2366         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2367         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2368         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2369         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2370         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2371         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2372         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2373         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2374         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2375         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2376
2377         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2378         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2379         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2380         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2381         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2382         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2383         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2384         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2385         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2386         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2387         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2388         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2389         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2390         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2391         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2392         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2393         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2394         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2395         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2396         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2397         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2398         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2399
2400         ifp = adapter->ifp;
2401
2402         /* Fill out the OS statistics structure */
2403         ifp->if_ipackets = adapter->stats.gprcl;
2404         ifp->if_opackets = adapter->stats.gptcl;
2405         ifp->if_ibytes = adapter->stats.gorcl;
2406         ifp->if_obytes = adapter->stats.gotcl;
2407         ifp->if_imcasts = adapter->stats.mprcl;
2408         ifp->if_collisions = 0;
2409
2410         /* Rx Errors */
2411         ifp->if_ierrors =
2412                 adapter->dropped_pkts +
2413                 adapter->stats.crcerrs +
2414                 adapter->stats.rnbc +
2415                 adapter->stats.mpc +
2416                 adapter->stats.rlec;
2417
2418
2419 }
2420
2421
2422 /**********************************************************************
2423  *
2424  *  This routine is called only when ixgb_display_debug_stats is enabled.
2425  *  This routine provides a way to take a look at important statistics
2426  *  maintained by the driver and hardware.
2427  *
2428  **********************************************************************/
2429 static void
2430 ixgb_print_hw_stats(struct adapter * adapter)
2431 {
2432         char            buf_speed[100], buf_type[100];
2433         ixgb_bus_speed  bus_speed;
2434         ixgb_bus_type   bus_type;
2435         device_t dev;
2436
2437         dev = adapter->dev;
2438 #ifdef _SV_
2439         device_printf(dev, "Packets not Avail = %ld\n",
2440                adapter->no_pkts_avail);
2441         device_printf(dev, "CleanTxInterrupts = %ld\n",
2442                adapter->clean_tx_interrupts);
2443         device_printf(dev, "ICR RXDMT0 = %lld\n",
2444                (long long)adapter->sv_stats.icr_rxdmt0);
2445         device_printf(dev, "ICR RXO = %lld\n",
2446                (long long)adapter->sv_stats.icr_rxo);
2447         device_printf(dev, "ICR RXT0 = %lld\n",
2448                (long long)adapter->sv_stats.icr_rxt0);
2449         device_printf(dev, "ICR TXDW = %lld\n",
2450                (long long)adapter->sv_stats.icr_TXDW);
2451 #endif                          /* _SV_ */
2452
2453         bus_speed = adapter->hw.bus.speed;
2454         bus_type = adapter->hw.bus.type;
2455         sprintf(buf_speed,
2456                 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2457                 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2458                 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2459                 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2460                 "UNKNOWN");
2461         device_printf(dev, "PCI_Bus_Speed = %s\n",
2462                buf_speed);
2463
2464         sprintf(buf_type,
2465                 bus_type == ixgb_bus_type_pci ? "PCI" :
2466                 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2467                 "UNKNOWN");
2468         device_printf(dev, "PCI_Bus_Type = %s\n",
2469                buf_type);
2470
2471         device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2472                adapter->no_tx_desc_avail1);
2473         device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2474                adapter->no_tx_desc_avail2);
2475         device_printf(dev, "Std Mbuf Failed = %ld\n",
2476                adapter->mbuf_alloc_failed);
2477         device_printf(dev, "Std Cluster Failed = %ld\n",
2478                adapter->mbuf_cluster_failed);
2479
2480         device_printf(dev, "Defer count = %lld\n",
2481                (long long)adapter->stats.dc);
2482         device_printf(dev, "Missed Packets = %lld\n",
2483                (long long)adapter->stats.mpc);
2484         device_printf(dev, "Receive No Buffers = %lld\n",
2485                (long long)adapter->stats.rnbc);
2486         device_printf(dev, "Receive length errors = %lld\n",
2487                (long long)adapter->stats.rlec);
2488         device_printf(dev, "Crc errors = %lld\n",
2489                (long long)adapter->stats.crcerrs);
2490         device_printf(dev, "Driver dropped packets = %ld\n",
2491                adapter->dropped_pkts);
2492
2493         device_printf(dev, "XON Rcvd = %lld\n",
2494                (long long)adapter->stats.xonrxc);
2495         device_printf(dev, "XON Xmtd = %lld\n",
2496                (long long)adapter->stats.xontxc);
2497         device_printf(dev, "XOFF Rcvd = %lld\n",
2498                (long long)adapter->stats.xoffrxc);
2499         device_printf(dev, "XOFF Xmtd = %lld\n",
2500                (long long)adapter->stats.xofftxc);
2501
2502         device_printf(dev, "Good Packets Rcvd = %lld\n",
2503                (long long)adapter->stats.gprcl);
2504         device_printf(dev, "Good Packets Xmtd = %lld\n",
2505                (long long)adapter->stats.gptcl);
2506
2507         device_printf(dev, "Jumbo frames recvd = %lld\n",
2508                (long long)adapter->stats.jprcl);
2509         device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2510                (long long)adapter->stats.jptcl);
2511
2512         return;
2513
2514 }
2515
2516 static int
2517 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2518 {
2519         int             error;
2520         int             result;
2521         struct adapter *adapter;
2522
2523         result = -1;
2524         error = sysctl_handle_int(oidp, &result, 0, req);
2525
2526         if (error || !req->newptr)
2527                 return (error);
2528
2529         if (result == 1) {
2530                 adapter = (struct adapter *) arg1;
2531                 ixgb_print_hw_stats(adapter);
2532         }
2533         return error;
2534 }