1 /*******************************************************************************
3 Copyright (c) 2001-2004, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
40 #include <dev/ixgb/if_ixgb.h>
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgb_display_debug_stats = 0;
47 /*********************************************************************
48 * Linked list of board private structures for all NICs found
49 *********************************************************************/
51 struct adapter *ixgb_adapter_list = NULL;
55 /*********************************************************************
57 *********************************************************************/
59 char ixgb_driver_version[] = "1.0.6";
60 char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
62 /*********************************************************************
65 * Used by probe to select devices to load on
66 * Last field stores an index into ixgb_strings
67 * Last entry must be all 0s
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
74 /* Intel(R) PRO/10000 Network Connection */
75 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77 /* required last entry */
81 /*********************************************************************
82 * Table of branding strings for all supported NICs.
83 *********************************************************************/
85 static char *ixgb_strings[] = {
86 "Intel(R) PRO/10GbE Network Driver"
89 /*********************************************************************
91 *********************************************************************/
92 static int ixgb_probe(device_t);
93 static int ixgb_attach(device_t);
94 static int ixgb_detach(device_t);
95 static int ixgb_shutdown(device_t);
96 static void ixgb_intr(void *);
97 static void ixgb_start(struct ifnet *);
98 static void ixgb_start_locked(struct ifnet *);
99 static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void ixgb_watchdog(struct adapter *);
101 static void ixgb_init(void *);
102 static void ixgb_init_locked(struct adapter *);
103 static void ixgb_stop(void *);
104 static void ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int ixgb_media_change(struct ifnet *);
106 static void ixgb_identify_hardware(struct adapter *);
107 static int ixgb_allocate_pci_resources(struct adapter *);
108 static void ixgb_free_pci_resources(struct adapter *);
109 static void ixgb_local_timer(void *);
110 static int ixgb_hardware_init(struct adapter *);
111 static int ixgb_setup_interface(device_t, struct adapter *);
112 static int ixgb_setup_transmit_structures(struct adapter *);
113 static void ixgb_initialize_transmit_unit(struct adapter *);
114 static int ixgb_setup_receive_structures(struct adapter *);
115 static void ixgb_initialize_receive_unit(struct adapter *);
116 static void ixgb_enable_intr(struct adapter *);
117 static void ixgb_disable_intr(struct adapter *);
118 static void ixgb_free_transmit_structures(struct adapter *);
119 static void ixgb_free_receive_structures(struct adapter *);
120 static void ixgb_update_stats_counters(struct adapter *);
121 static void ixgb_clean_transmit_interrupts(struct adapter *);
122 static int ixgb_allocate_receive_structures(struct adapter *);
123 static int ixgb_allocate_transmit_structures(struct adapter *);
124 static int ixgb_process_receive_interrupts(struct adapter *, int);
126 ixgb_receive_checksum(struct adapter *,
127 struct ixgb_rx_desc * rx_desc,
130 ixgb_transmit_checksum_setup(struct adapter *,
133 static void ixgb_set_promisc(struct adapter *);
134 static void ixgb_disable_promisc(struct adapter *);
135 static void ixgb_set_multi(struct adapter *);
136 static void ixgb_print_hw_stats(struct adapter *);
137 static void ixgb_print_link_status(struct adapter *);
139 ixgb_get_buf(int i, struct adapter *,
141 static void ixgb_enable_vlans(struct adapter * adapter);
142 static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146 struct ixgb_dma_alloc *, int);
147 static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
152 /*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
156 static device_method_t ixgb_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, ixgb_probe),
159 DEVMETHOD(device_attach, ixgb_attach),
160 DEVMETHOD(device_detach, ixgb_detach),
161 DEVMETHOD(device_shutdown, ixgb_shutdown),
166 static driver_t ixgb_driver = {
167 "ixgb", ixgb_methods, sizeof(struct adapter),
170 static devclass_t ixgb_devclass;
171 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
173 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
174 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
176 /* some defines for controlling descriptor fetches in h/w */
177 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
178 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
179 * pushed this many descriptors from
181 #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */
184 /*********************************************************************
185 * Device identification routine
187 * ixgb_probe determines if the driver should be loaded on
188 * adapter based on PCI vendor/device id of the adapter.
190 * return 0 on success, positive on failure
191 *********************************************************************/
194 ixgb_probe(device_t dev)
196 ixgb_vendor_info_t *ent;
198 u_int16_t pci_vendor_id = 0;
199 u_int16_t pci_device_id = 0;
200 u_int16_t pci_subvendor_id = 0;
201 u_int16_t pci_subdevice_id = 0;
202 char adapter_name[60];
204 INIT_DEBUGOUT("ixgb_probe: begin");
206 pci_vendor_id = pci_get_vendor(dev);
207 if (pci_vendor_id != IXGB_VENDOR_ID)
210 pci_device_id = pci_get_device(dev);
211 pci_subvendor_id = pci_get_subvendor(dev);
212 pci_subdevice_id = pci_get_subdevice(dev);
214 ent = ixgb_vendor_info_array;
215 while (ent->vendor_id != 0) {
216 if ((pci_vendor_id == ent->vendor_id) &&
217 (pci_device_id == ent->device_id) &&
219 ((pci_subvendor_id == ent->subvendor_id) ||
220 (ent->subvendor_id == PCI_ANY_ID)) &&
222 ((pci_subdevice_id == ent->subdevice_id) ||
223 (ent->subdevice_id == PCI_ANY_ID))) {
224 sprintf(adapter_name, "%s, Version - %s",
225 ixgb_strings[ent->index],
226 ixgb_driver_version);
227 device_set_desc_copy(dev, adapter_name);
228 return (BUS_PROBE_DEFAULT);
236 /*********************************************************************
237 * Device initialization routine
239 * The attach entry point is called when the driver is being loaded.
240 * This routine identifies the type of hardware, allocates all resources
241 * and initializes the hardware.
243 * return 0 on success, positive on failure
244 *********************************************************************/
247 ixgb_attach(device_t dev)
249 struct adapter *adapter;
253 device_printf(dev, "%s\n", ixgb_copyright);
254 INIT_DEBUGOUT("ixgb_attach: begin");
256 /* Allocate, clear, and link in our adapter structure */
257 if (!(adapter = device_get_softc(dev))) {
258 device_printf(dev, "adapter structure allocation failed\n");
261 bzero(adapter, sizeof(struct adapter));
263 adapter->osdep.dev = dev;
264 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
266 if (ixgb_adapter_list != NULL)
267 ixgb_adapter_list->prev = adapter;
268 adapter->next = ixgb_adapter_list;
269 ixgb_adapter_list = adapter;
272 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
276 ixgb_sysctl_stats, "I", "Statistics");
278 callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
280 /* Determine hardware revision */
281 ixgb_identify_hardware(adapter);
283 /* Parameters (to be read from user) */
284 adapter->num_tx_desc = IXGB_MAX_TXD;
285 adapter->num_rx_desc = IXGB_MAX_RXD;
286 adapter->tx_int_delay = TIDV;
287 adapter->rx_int_delay = RDTR;
288 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
290 adapter->hw.fc.high_water = FCRTH;
291 adapter->hw.fc.low_water = FCRTL;
292 adapter->hw.fc.pause_time = FCPAUSE;
293 adapter->hw.fc.send_xon = TRUE;
294 adapter->hw.fc.type = FLOW_CONTROL;
297 /* Set the max frame size assuming standard ethernet sized frames */
298 adapter->hw.max_frame_size =
299 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
301 if (ixgb_allocate_pci_resources(adapter)) {
302 device_printf(dev, "Allocation of PCI resources failed\n");
306 tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
307 sizeof(struct ixgb_tx_desc), 4096);
309 /* Allocate Transmit Descriptor ring */
310 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
311 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
315 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
317 rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
318 sizeof(struct ixgb_rx_desc), 4096);
320 /* Allocate Receive Descriptor ring */
321 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
322 device_printf(dev, "Unable to allocate rx_desc memory\n");
326 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
328 /* Allocate multicast array memory. */
329 adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
330 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
331 if (adapter->mta == NULL) {
332 device_printf(dev, "Can not allocate multicast setup array\n");
337 /* Initialize the hardware */
338 if (ixgb_hardware_init(adapter)) {
339 device_printf(dev, "Unable to initialize the hardware\n");
343 /* Setup OS specific network interface */
344 if (ixgb_setup_interface(dev, adapter) != 0)
347 /* Initialize statistics */
348 ixgb_clear_hw_cntrs(&adapter->hw);
349 ixgb_update_stats_counters(adapter);
351 INIT_DEBUGOUT("ixgb_attach: end");
355 ixgb_dma_free(adapter, &adapter->rxdma);
357 ixgb_dma_free(adapter, &adapter->txdma);
360 if (adapter->ifp != NULL)
361 if_free(adapter->ifp);
362 ixgb_free_pci_resources(adapter);
363 sysctl_ctx_free(&adapter->sysctl_ctx);
364 free(adapter->mta, M_DEVBUF);
369 /*********************************************************************
370 * Device removal routine
372 * The detach entry point is called when the driver is being removed.
373 * This routine stops the adapter and deallocates all the resources
374 * that were allocated for driver operation.
376 * return 0 on success, positive on failure
377 *********************************************************************/
380 ixgb_detach(device_t dev)
382 struct adapter *adapter = device_get_softc(dev);
383 struct ifnet *ifp = adapter->ifp;
385 INIT_DEBUGOUT("ixgb_detach: begin");
387 #ifdef DEVICE_POLLING
388 if (ifp->if_capenable & IFCAP_POLLING)
389 ether_poll_deregister(ifp);
393 adapter->in_detach = 1;
396 IXGB_UNLOCK(adapter);
398 #if __FreeBSD_version < 500000
399 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
403 callout_drain(&adapter->timer);
404 ixgb_free_pci_resources(adapter);
405 #if __FreeBSD_version >= 500000
409 /* Free Transmit Descriptor ring */
410 if (adapter->tx_desc_base) {
411 ixgb_dma_free(adapter, &adapter->txdma);
412 adapter->tx_desc_base = NULL;
414 /* Free Receive Descriptor ring */
415 if (adapter->rx_desc_base) {
416 ixgb_dma_free(adapter, &adapter->rxdma);
417 adapter->rx_desc_base = NULL;
419 /* Remove from the adapter list */
420 if (ixgb_adapter_list == adapter)
421 ixgb_adapter_list = adapter->next;
422 if (adapter->next != NULL)
423 adapter->next->prev = adapter->prev;
424 if (adapter->prev != NULL)
425 adapter->prev->next = adapter->next;
426 free(adapter->mta, M_DEVBUF);
428 IXGB_LOCK_DESTROY(adapter);
432 /*********************************************************************
434 * Shutdown entry point
436 **********************************************************************/
439 ixgb_shutdown(device_t dev)
441 struct adapter *adapter = device_get_softc(dev);
444 IXGB_UNLOCK(adapter);
449 /*********************************************************************
450 * Transmit entry point
452 * ixgb_start is called by the stack to initiate a transmit.
453 * The driver will remain in this routine as long as there are
454 * packets to transmit and transmit resources are available.
455 * In case resources are not available stack is notified and
456 * the packet is requeued.
457 **********************************************************************/
460 ixgb_start_locked(struct ifnet * ifp)
463 struct adapter *adapter = ifp->if_softc;
465 IXGB_LOCK_ASSERT(adapter);
467 if (!adapter->link_active)
470 while (ifp->if_snd.ifq_head != NULL) {
471 IF_DEQUEUE(&ifp->if_snd, m_head);
476 if (ixgb_encap(adapter, m_head)) {
477 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
478 IF_PREPEND(&ifp->if_snd, m_head);
481 /* Send a copy of the frame to the BPF listener */
482 #if __FreeBSD_version < 500000
484 bpf_mtap(ifp, m_head);
486 ETHER_BPF_MTAP(ifp, m_head);
488 /* Set timeout in case hardware has problems transmitting */
489 adapter->tx_timer = IXGB_TX_TIMEOUT;
496 ixgb_start(struct ifnet *ifp)
498 struct adapter *adapter = ifp->if_softc;
501 ixgb_start_locked(ifp);
502 IXGB_UNLOCK(adapter);
506 /*********************************************************************
509 * ixgb_ioctl is called when the user wants to configure the
512 * return 0 on success, positive on failure
513 **********************************************************************/
516 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
519 struct ifreq *ifr = (struct ifreq *) data;
520 struct adapter *adapter = ifp->if_softc;
522 if (adapter->in_detach)
528 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
529 ether_ioctl(ifp, command, data);
532 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
533 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
537 ifp->if_mtu = ifr->ifr_mtu;
538 adapter->hw.max_frame_size =
539 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
541 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
542 ixgb_init_locked(adapter);
543 IXGB_UNLOCK(adapter);
547 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
549 if (ifp->if_flags & IFF_UP) {
550 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
551 ixgb_init_locked(adapter);
553 ixgb_disable_promisc(adapter);
554 ixgb_set_promisc(adapter);
556 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
560 IXGB_UNLOCK(adapter);
564 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
565 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
567 ixgb_disable_intr(adapter);
568 ixgb_set_multi(adapter);
569 ixgb_enable_intr(adapter);
570 IXGB_UNLOCK(adapter);
575 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
576 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
579 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
580 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
581 #ifdef DEVICE_POLLING
582 if (mask & IFCAP_POLLING) {
583 if (ifr->ifr_reqcap & IFCAP_POLLING) {
584 error = ether_poll_register(ixgb_poll, ifp);
588 ixgb_disable_intr(adapter);
589 ifp->if_capenable |= IFCAP_POLLING;
590 IXGB_UNLOCK(adapter);
592 error = ether_poll_deregister(ifp);
593 /* Enable interrupt even in error case */
595 ixgb_enable_intr(adapter);
596 ifp->if_capenable &= ~IFCAP_POLLING;
597 IXGB_UNLOCK(adapter);
600 #endif /* DEVICE_POLLING */
601 if (mask & IFCAP_HWCSUM) {
602 if (IFCAP_HWCSUM & ifp->if_capenable)
603 ifp->if_capenable &= ~IFCAP_HWCSUM;
605 ifp->if_capenable |= IFCAP_HWCSUM;
606 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
611 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
619 /*********************************************************************
620 * Watchdog entry point
622 * This routine is called whenever hardware quits transmitting.
624 **********************************************************************/
627 ixgb_watchdog(struct adapter *adapter)
634 * If we are in this routine because of pause frames, then don't
635 * reset the hardware.
637 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
638 adapter->tx_timer = IXGB_TX_TIMEOUT;
641 if_printf(ifp, "watchdog timeout -- resetting\n");
644 ixgb_init_locked(adapter);
652 /*********************************************************************
655 * This routine is used in two ways. It is used by the stack as
656 * init entry point in network interface structure. It is also used
657 * by the driver as a hw/sw initialization routine to get to a
660 * return 0 on success, positive on failure
661 **********************************************************************/
664 ixgb_init_locked(struct adapter *adapter)
668 INIT_DEBUGOUT("ixgb_init: begin");
670 IXGB_LOCK_ASSERT(adapter);
675 /* Get the latest mac address, User can use a LAA */
676 bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
677 IXGB_ETH_LENGTH_OF_ADDRESS);
679 /* Initialize the hardware */
680 if (ixgb_hardware_init(adapter)) {
681 if_printf(ifp, "Unable to initialize the hardware\n");
684 ixgb_enable_vlans(adapter);
686 /* Prepare transmit descriptors and buffers */
687 if (ixgb_setup_transmit_structures(adapter)) {
688 if_printf(ifp, "Could not setup transmit structures\n");
692 ixgb_initialize_transmit_unit(adapter);
694 /* Setup Multicast table */
695 ixgb_set_multi(adapter);
697 /* Prepare receive descriptors and buffers */
698 if (ixgb_setup_receive_structures(adapter)) {
699 if_printf(ifp, "Could not setup receive structures\n");
703 ixgb_initialize_receive_unit(adapter);
705 /* Don't lose promiscuous settings */
706 ixgb_set_promisc(adapter);
709 ifp->if_drv_flags |= IFF_DRV_RUNNING;
710 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
713 if (ifp->if_capenable & IFCAP_TXCSUM)
714 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
716 ifp->if_hwassist = 0;
719 /* Enable jumbo frames */
720 if (ifp->if_mtu > ETHERMTU) {
722 IXGB_WRITE_REG(&adapter->hw, MFS,
723 adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
724 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
725 temp_reg |= IXGB_CTRL0_JFE;
726 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
728 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
729 ixgb_clear_hw_cntrs(&adapter->hw);
730 #ifdef DEVICE_POLLING
732 * Only disable interrupts if we are polling, make sure they are on
735 if (ifp->if_capenable & IFCAP_POLLING)
736 ixgb_disable_intr(adapter);
739 ixgb_enable_intr(adapter);
747 struct adapter *adapter = arg;
750 ixgb_init_locked(adapter);
751 IXGB_UNLOCK(adapter);
755 #ifdef DEVICE_POLLING
757 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
759 struct adapter *adapter = ifp->if_softc;
763 IXGB_LOCK_ASSERT(adapter);
765 if (cmd == POLL_AND_CHECK_STATUS) {
766 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
767 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
768 ixgb_check_for_link(&adapter->hw);
769 ixgb_print_link_status(adapter);
772 rx_npkts = ixgb_process_receive_interrupts(adapter, count);
773 ixgb_clean_transmit_interrupts(adapter);
775 if (ifp->if_snd.ifq_head != NULL)
776 ixgb_start_locked(ifp);
781 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
783 struct adapter *adapter = ifp->if_softc;
787 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
788 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
789 IXGB_UNLOCK(adapter);
792 #endif /* DEVICE_POLLING */
794 /*********************************************************************
796 * Interrupt Service routine
798 **********************************************************************/
803 u_int32_t loop_cnt = IXGB_MAX_INTR;
806 struct adapter *adapter = arg;
807 boolean_t rxdmt0 = FALSE;
813 #ifdef DEVICE_POLLING
814 if (ifp->if_capenable & IFCAP_POLLING) {
815 IXGB_UNLOCK(adapter);
820 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
822 IXGB_UNLOCK(adapter);
826 if (reg_icr & IXGB_INT_RXDMT0)
830 if (reg_icr & IXGB_INT_RXDMT0)
831 adapter->sv_stats.icr_rxdmt0++;
832 if (reg_icr & IXGB_INT_RXO)
833 adapter->sv_stats.icr_rxo++;
834 if (reg_icr & IXGB_INT_RXT0)
835 adapter->sv_stats.icr_rxt0++;
836 if (reg_icr & IXGB_INT_TXDW)
837 adapter->sv_stats.icr_TXDW++;
840 /* Link status change */
841 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
842 ixgb_check_for_link(&adapter->hw);
843 ixgb_print_link_status(adapter);
845 while (loop_cnt > 0) {
846 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
847 ixgb_process_receive_interrupts(adapter, -1);
848 ixgb_clean_transmit_interrupts(adapter);
853 if (rxdmt0 && adapter->raidc) {
854 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
855 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
857 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
858 ixgb_start_locked(ifp);
860 IXGB_UNLOCK(adapter);
865 /*********************************************************************
867 * Media Ioctl callback
869 * This routine is called whenever the user queries the status of
870 * the interface using ifconfig.
872 **********************************************************************/
874 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
876 struct adapter *adapter = ifp->if_softc;
878 INIT_DEBUGOUT("ixgb_media_status: begin");
880 ixgb_check_for_link(&adapter->hw);
881 ixgb_print_link_status(adapter);
883 ifmr->ifm_status = IFM_AVALID;
884 ifmr->ifm_active = IFM_ETHER;
886 if (!adapter->hw.link_up)
889 ifmr->ifm_status |= IFM_ACTIVE;
890 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
895 /*********************************************************************
897 * Media Ioctl callback
899 * This routine is called when the user changes speed/duplex using
900 * media/mediopt option with ifconfig.
902 **********************************************************************/
904 ixgb_media_change(struct ifnet * ifp)
906 struct adapter *adapter = ifp->if_softc;
907 struct ifmedia *ifm = &adapter->media;
909 INIT_DEBUGOUT("ixgb_media_change: begin");
911 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
917 /*********************************************************************
919 * This routine maps the mbufs to tx descriptors.
921 * return 0 on success, positive on failure
922 **********************************************************************/
925 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
928 int i, j, error, nsegs;
930 #if __FreeBSD_version < 500000
931 struct ifvlan *ifv = NULL;
933 bus_dma_segment_t segs[IXGB_MAX_SCATTER];
935 struct ixgb_buffer *tx_buffer = NULL;
936 struct ixgb_tx_desc *current_tx_desc = NULL;
937 struct ifnet *ifp = adapter->ifp;
940 * Force a cleanup if number of TX descriptors available hits the
943 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
944 ixgb_clean_transmit_interrupts(adapter);
946 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
947 adapter->no_tx_desc_avail1++;
951 * Map the packet for DMA.
953 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
954 adapter->no_tx_map_avail++;
957 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
958 &nsegs, BUS_DMA_NOWAIT);
960 adapter->no_tx_dma_setup++;
961 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
962 "error %u\n", error);
963 bus_dmamap_destroy(adapter->txtag, map);
966 KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
968 if (nsegs > adapter->num_tx_desc_avail) {
969 adapter->no_tx_desc_avail2++;
970 bus_dmamap_destroy(adapter->txtag, map);
973 if (ifp->if_hwassist > 0) {
974 ixgb_transmit_checksum_setup(adapter, m_head,
979 /* Find out if we are in vlan mode */
980 #if __FreeBSD_version < 500000
981 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
982 m_head->m_pkthdr.rcvif != NULL &&
983 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
984 ifv = m_head->m_pkthdr.rcvif->if_softc;
985 #elseif __FreeBSD_version < 700000
986 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
988 i = adapter->next_avail_tx_desc;
989 for (j = 0; j < nsegs; j++) {
990 tx_buffer = &adapter->tx_buffer_area[i];
991 current_tx_desc = &adapter->tx_desc_base[i];
993 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
994 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
995 current_tx_desc->popts = txd_popts;
996 if (++i == adapter->num_tx_desc)
999 tx_buffer->m_head = NULL;
1002 adapter->num_tx_desc_avail -= nsegs;
1003 adapter->next_avail_tx_desc = i;
1005 #if __FreeBSD_version < 500000
1007 /* Set the vlan id */
1008 current_tx_desc->vlan = ifv->ifv_tag;
1009 #elseif __FreeBSD_version < 700000
1011 /* Set the vlan id */
1012 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1014 if (m_head->m_flags & M_VLANTAG) {
1015 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1018 /* Tell hardware to add tag */
1019 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1021 tx_buffer->m_head = m_head;
1022 tx_buffer->map = map;
1023 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1026 * Last Descriptor of Packet needs End Of Packet (EOP)
1028 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1031 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1032 * that this frame is available to transmit.
1034 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1040 ixgb_set_promisc(struct adapter * adapter)
1044 struct ifnet *ifp = adapter->ifp;
1046 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1048 if (ifp->if_flags & IFF_PROMISC) {
1049 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1050 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1051 } else if (ifp->if_flags & IFF_ALLMULTI) {
1052 reg_rctl |= IXGB_RCTL_MPE;
1053 reg_rctl &= ~IXGB_RCTL_UPE;
1054 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1060 ixgb_disable_promisc(struct adapter * adapter)
1064 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1066 reg_rctl &= (~IXGB_RCTL_UPE);
1067 reg_rctl &= (~IXGB_RCTL_MPE);
1068 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1074 /*********************************************************************
1077 * This routine is called whenever multicast address list is updated.
1079 **********************************************************************/
1082 ixgb_set_multi(struct adapter * adapter)
1084 u_int32_t reg_rctl = 0;
1086 struct ifmultiaddr *ifma;
1088 struct ifnet *ifp = adapter->ifp;
1090 IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1093 bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1094 MAX_NUM_MULTICAST_ADDRESSES);
1096 if_maddr_rlock(ifp);
1097 #if __FreeBSD_version < 500000
1098 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1100 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1102 if (ifma->ifma_addr->sa_family != AF_LINK)
1105 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1106 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1109 if_maddr_runlock(ifp);
1111 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1112 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1113 reg_rctl |= IXGB_RCTL_MPE;
1114 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1116 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1122 /*********************************************************************
1125 * This routine checks for link status and updates statistics.
1127 **********************************************************************/
1130 ixgb_local_timer(void *arg)
1133 struct adapter *adapter = arg;
1136 IXGB_LOCK_ASSERT(adapter);
1138 ixgb_check_for_link(&adapter->hw);
1139 ixgb_print_link_status(adapter);
1140 ixgb_update_stats_counters(adapter);
1141 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1142 ixgb_print_hw_stats(adapter);
1144 if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1145 ixgb_watchdog(adapter);
1146 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1150 ixgb_print_link_status(struct adapter * adapter)
1152 if (adapter->hw.link_up) {
1153 if (!adapter->link_active) {
1154 if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1157 adapter->link_active = 1;
1160 if (adapter->link_active) {
1161 if_printf(adapter->ifp, "Link is Down \n");
1162 adapter->link_active = 0;
1171 /*********************************************************************
1173 * This routine disables all traffic on the adapter by issuing a
1174 * global reset on the MAC and deallocates TX/RX buffers.
1176 **********************************************************************/
1179 ixgb_stop(void *arg)
1182 struct adapter *adapter = arg;
1185 IXGB_LOCK_ASSERT(adapter);
1187 INIT_DEBUGOUT("ixgb_stop: begin\n");
1188 ixgb_disable_intr(adapter);
1189 adapter->hw.adapter_stopped = FALSE;
1190 ixgb_adapter_stop(&adapter->hw);
1191 callout_stop(&adapter->timer);
1192 ixgb_free_transmit_structures(adapter);
1193 ixgb_free_receive_structures(adapter);
1195 /* Tell the stack that the interface is no longer active */
1196 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1197 adapter->tx_timer = 0;
1203 /*********************************************************************
1205 * Determine hardware revision.
1207 **********************************************************************/
1209 ixgb_identify_hardware(struct adapter * adapter)
1211 device_t dev = adapter->dev;
1213 /* Make sure our PCI config space has the necessary stuff set */
1214 pci_enable_busmaster(dev);
1215 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1217 /* Save off the information about this board */
1218 adapter->hw.vendor_id = pci_get_vendor(dev);
1219 adapter->hw.device_id = pci_get_device(dev);
1220 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1221 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1222 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1224 /* Set MacType, etc. based on this PCI info */
1225 switch (adapter->hw.device_id) {
1226 case IXGB_DEVICE_ID_82597EX:
1227 case IXGB_DEVICE_ID_82597EX_SR:
1228 adapter->hw.mac_type = ixgb_82597;
1231 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1232 device_printf(dev, "unsupported device id 0x%x\n",
1233 adapter->hw.device_id);
1240 ixgb_allocate_pci_resources(struct adapter * adapter)
1243 device_t dev = adapter->dev;
1246 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1249 if (!(adapter->res_memory)) {
1250 device_printf(dev, "Unable to allocate bus resource: memory\n");
1253 adapter->osdep.mem_bus_space_tag =
1254 rman_get_bustag(adapter->res_memory);
1255 adapter->osdep.mem_bus_space_handle =
1256 rman_get_bushandle(adapter->res_memory);
1257 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1260 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1262 RF_SHAREABLE | RF_ACTIVE);
1263 if (!(adapter->res_interrupt)) {
1265 "Unable to allocate bus resource: interrupt\n");
1268 if (bus_setup_intr(dev, adapter->res_interrupt,
1269 INTR_TYPE_NET | INTR_MPSAFE,
1270 NULL, (void (*) (void *))ixgb_intr, adapter,
1271 &adapter->int_handler_tag)) {
1272 device_printf(dev, "Error registering interrupt handler!\n");
1275 adapter->hw.back = &adapter->osdep;
1281 ixgb_free_pci_resources(struct adapter * adapter)
1283 device_t dev = adapter->dev;
1285 if (adapter->res_interrupt != NULL) {
1286 bus_teardown_intr(dev, adapter->res_interrupt,
1287 adapter->int_handler_tag);
1288 bus_release_resource(dev, SYS_RES_IRQ, 0,
1289 adapter->res_interrupt);
1291 if (adapter->res_memory != NULL) {
1292 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1293 adapter->res_memory);
1295 if (adapter->res_ioport != NULL) {
1296 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1297 adapter->res_ioport);
1302 /*********************************************************************
1304 * Initialize the hardware to a configuration as specified by the
1305 * adapter structure. The controller is reset, the EEPROM is
1306 * verified, the MAC address is set, then the shared initialization
1307 * routines are called.
1309 **********************************************************************/
1311 ixgb_hardware_init(struct adapter * adapter)
1313 /* Issue a global reset */
1314 adapter->hw.adapter_stopped = FALSE;
1315 ixgb_adapter_stop(&adapter->hw);
1317 /* Make sure we have a good EEPROM before we read from it */
1318 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1319 device_printf(adapter->dev,
1320 "The EEPROM Checksum Is Not Valid\n");
1323 if (!ixgb_init_hw(&adapter->hw)) {
1324 device_printf(adapter->dev, "Hardware Initialization Failed");
1331 /*********************************************************************
1333 * Setup networking device structure and register an interface.
1335 **********************************************************************/
1337 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1340 INIT_DEBUGOUT("ixgb_setup_interface: begin");
1342 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1344 device_printf(dev, "can not allocate ifnet structure\n");
1347 #if __FreeBSD_version >= 502000
1348 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1350 ifp->if_unit = device_get_unit(dev);
1351 ifp->if_name = "ixgb";
1353 ifp->if_baudrate = 1000000000;
1354 ifp->if_init = ixgb_init;
1355 ifp->if_softc = adapter;
1356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1357 ifp->if_ioctl = ixgb_ioctl;
1358 ifp->if_start = ixgb_start;
1359 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1361 #if __FreeBSD_version < 500000
1362 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1364 ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1367 ifp->if_capabilities = IFCAP_HWCSUM;
1370 * Tell the upper layer(s) we support long frames.
1372 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1374 #if __FreeBSD_version >= 500000
1375 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1378 ifp->if_capenable = ifp->if_capabilities;
1380 #ifdef DEVICE_POLLING
1381 ifp->if_capabilities |= IFCAP_POLLING;
1385 * Specify the media types supported by this adapter and register
1386 * callbacks to update media and link information
1388 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1390 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1392 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1394 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1395 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1400 /********************************************************************
1401 * Manage DMA'able memory.
1402 *******************************************************************/
1404 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1408 *(bus_addr_t *) arg = segs->ds_addr;
1413 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1414 struct ixgb_dma_alloc * dma, int mapflags)
1420 r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1421 PAGE_SIZE, 0, /* alignment, bounds */
1422 BUS_SPACE_MAXADDR, /* lowaddr */
1423 BUS_SPACE_MAXADDR, /* highaddr */
1424 NULL, NULL, /* filter, filterarg */
1427 size, /* maxsegsize */
1428 BUS_DMA_ALLOCNOW, /* flags */
1429 #if __FreeBSD_version >= 502000
1430 NULL, /* lockfunc */
1431 NULL, /* lockfuncarg */
1435 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1439 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1440 BUS_DMA_NOWAIT, &dma->dma_map);
1442 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1446 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1450 mapflags | BUS_DMA_NOWAIT);
1452 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1456 dma->dma_size = size;
1459 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1461 bus_dma_tag_destroy(dma->dma_tag);
1463 dma->dma_tag = NULL;
1470 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1472 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1473 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1474 bus_dma_tag_destroy(dma->dma_tag);
1477 /*********************************************************************
1479 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1480 * the information needed to transmit a packet on the wire.
1482 **********************************************************************/
1484 ixgb_allocate_transmit_structures(struct adapter * adapter)
1486 if (!(adapter->tx_buffer_area =
1487 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1488 adapter->num_tx_desc, M_DEVBUF,
1489 M_NOWAIT | M_ZERO))) {
1490 device_printf(adapter->dev,
1491 "Unable to allocate tx_buffer memory\n");
1494 bzero(adapter->tx_buffer_area,
1495 sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1500 /*********************************************************************
1502 * Allocate and initialize transmit structures.
1504 **********************************************************************/
1506 ixgb_setup_transmit_structures(struct adapter * adapter)
1509 * Setup DMA descriptor areas.
1511 if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1512 PAGE_SIZE, 0, /* alignment, bounds */
1513 BUS_SPACE_MAXADDR, /* lowaddr */
1514 BUS_SPACE_MAXADDR, /* highaddr */
1515 NULL, NULL, /* filter, filterarg */
1516 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */
1517 IXGB_MAX_SCATTER, /* nsegments */
1518 MCLBYTES, /* maxsegsize */
1519 BUS_DMA_ALLOCNOW, /* flags */
1520 #if __FreeBSD_version >= 502000
1521 NULL, /* lockfunc */
1522 NULL, /* lockfuncarg */
1525 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1528 if (ixgb_allocate_transmit_structures(adapter))
1531 bzero((void *)adapter->tx_desc_base,
1532 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1534 adapter->next_avail_tx_desc = 0;
1535 adapter->oldest_used_tx_desc = 0;
1537 /* Set number of descriptors available */
1538 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1540 /* Set checksum context */
1541 adapter->active_checksum_context = OFFLOAD_NONE;
1546 /*********************************************************************
1548 * Enable transmit unit.
1550 **********************************************************************/
1552 ixgb_initialize_transmit_unit(struct adapter * adapter)
1555 u_int64_t tdba = adapter->txdma.dma_paddr;
1557 /* Setup the Base and Length of the Tx Descriptor Ring */
1558 IXGB_WRITE_REG(&adapter->hw, TDBAL,
1559 (tdba & 0x00000000ffffffffULL));
1560 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1561 IXGB_WRITE_REG(&adapter->hw, TDLEN,
1562 adapter->num_tx_desc *
1563 sizeof(struct ixgb_tx_desc));
1565 /* Setup the HW Tx Head and Tail descriptor pointers */
1566 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1567 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1570 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1571 IXGB_READ_REG(&adapter->hw, TDBAL),
1572 IXGB_READ_REG(&adapter->hw, TDLEN));
1574 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1577 /* Program the Transmit Control Register */
1578 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1579 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1580 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1582 /* Setup Transmit Descriptor Settings for this adapter */
1583 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1585 if (adapter->tx_int_delay > 0)
1586 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1590 /*********************************************************************
1592 * Free all transmit related data structures.
1594 **********************************************************************/
1596 ixgb_free_transmit_structures(struct adapter * adapter)
1598 struct ixgb_buffer *tx_buffer;
1601 INIT_DEBUGOUT("free_transmit_structures: begin");
1603 if (adapter->tx_buffer_area != NULL) {
1604 tx_buffer = adapter->tx_buffer_area;
1605 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1606 if (tx_buffer->m_head != NULL) {
1607 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1608 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1609 m_freem(tx_buffer->m_head);
1611 tx_buffer->m_head = NULL;
1614 if (adapter->tx_buffer_area != NULL) {
1615 free(adapter->tx_buffer_area, M_DEVBUF);
1616 adapter->tx_buffer_area = NULL;
1618 if (adapter->txtag != NULL) {
1619 bus_dma_tag_destroy(adapter->txtag);
1620 adapter->txtag = NULL;
1625 /*********************************************************************
1627 * The offload context needs to be set when we transfer the first
1628 * packet of a particular protocol (TCP/UDP). We change the
1629 * context only if the protocol type changes.
1631 **********************************************************************/
1633 ixgb_transmit_checksum_setup(struct adapter * adapter,
1635 u_int8_t * txd_popts)
1637 struct ixgb_context_desc *TXD;
1638 struct ixgb_buffer *tx_buffer;
1641 if (mp->m_pkthdr.csum_flags) {
1643 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1644 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1645 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1648 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1649 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1650 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1654 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1665 * If we reach this point, the checksum offload context needs to be
1668 curr_txd = adapter->next_avail_tx_desc;
1669 tx_buffer = &adapter->tx_buffer_area[curr_txd];
1670 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1673 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1678 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1680 ENET_HEADER_SIZE + sizeof(struct ip) +
1681 offsetof(struct tcphdr, th_sum);
1682 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1684 ENET_HEADER_SIZE + sizeof(struct ip) +
1685 offsetof(struct udphdr, uh_sum);
1687 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1689 tx_buffer->m_head = NULL;
1691 if (++curr_txd == adapter->num_tx_desc)
1694 adapter->num_tx_desc_avail--;
1695 adapter->next_avail_tx_desc = curr_txd;
1699 /**********************************************************************
1701 * Examine each tx_buffer in the used queue. If the hardware is done
1702 * processing the packet then free associated resources. The
1703 * tx_buffer is put back on the free queue.
1705 **********************************************************************/
1707 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1710 struct ixgb_buffer *tx_buffer;
1711 struct ixgb_tx_desc *tx_desc;
1713 IXGB_LOCK_ASSERT(adapter);
1715 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1719 adapter->clean_tx_interrupts++;
1721 num_avail = adapter->num_tx_desc_avail;
1722 i = adapter->oldest_used_tx_desc;
1724 tx_buffer = &adapter->tx_buffer_area[i];
1725 tx_desc = &adapter->tx_desc_base[i];
1727 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1729 tx_desc->status = 0;
1732 if (tx_buffer->m_head) {
1733 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1734 BUS_DMASYNC_POSTWRITE);
1735 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1736 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1737 m_freem(tx_buffer->m_head);
1738 tx_buffer->m_head = NULL;
1740 if (++i == adapter->num_tx_desc)
1743 tx_buffer = &adapter->tx_buffer_area[i];
1744 tx_desc = &adapter->tx_desc_base[i];
1747 adapter->oldest_used_tx_desc = i;
1750 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1751 * it is OK to send packets. If there are no pending descriptors,
1752 * clear the timeout. Otherwise, if some descriptors have been freed,
1753 * restart the timeout.
1755 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1756 struct ifnet *ifp = adapter->ifp;
1758 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1759 if (num_avail == adapter->num_tx_desc)
1760 adapter->tx_timer = 0;
1761 else if (num_avail == adapter->num_tx_desc_avail)
1762 adapter->tx_timer = IXGB_TX_TIMEOUT;
1764 adapter->num_tx_desc_avail = num_avail;
1769 /*********************************************************************
1771 * Get a buffer from system mbuf buffer pool.
1773 **********************************************************************/
1775 ixgb_get_buf(int i, struct adapter * adapter,
1778 register struct mbuf *mp = nmp;
1779 struct ixgb_buffer *rx_buffer;
1788 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1791 adapter->mbuf_alloc_failed++;
1794 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1796 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797 mp->m_data = mp->m_ext.ext_buf;
1801 if (ifp->if_mtu <= ETHERMTU) {
1802 m_adj(mp, ETHER_ALIGN);
1804 rx_buffer = &adapter->rx_buffer_area[i];
1807 * Using memory from the mbuf cluster pool, invoke the bus_dma
1808 * machinery to arrange the memory mapping.
1810 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1811 mtod(mp, void *), mp->m_len,
1812 ixgb_dmamap_cb, &paddr, 0);
1817 rx_buffer->m_head = mp;
1818 adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1819 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1824 /*********************************************************************
1826 * Allocate memory for rx_buffer structures. Since we use one
1827 * rx_buffer per received packet, the maximum number of rx_buffer's
1828 * that we'll need is equal to the number of receive descriptors
1829 * that we've allocated.
1831 **********************************************************************/
1833 ixgb_allocate_receive_structures(struct adapter * adapter)
1836 struct ixgb_buffer *rx_buffer;
1838 if (!(adapter->rx_buffer_area =
1839 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1840 adapter->num_rx_desc, M_DEVBUF,
1841 M_NOWAIT | M_ZERO))) {
1842 device_printf(adapter->dev,
1843 "Unable to allocate rx_buffer memory\n");
1846 bzero(adapter->rx_buffer_area,
1847 sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1849 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1850 PAGE_SIZE, 0, /* alignment, bounds */
1851 BUS_SPACE_MAXADDR, /* lowaddr */
1852 BUS_SPACE_MAXADDR, /* highaddr */
1853 NULL, NULL, /* filter, filterarg */
1854 MCLBYTES, /* maxsize */
1856 MCLBYTES, /* maxsegsize */
1857 BUS_DMA_ALLOCNOW, /* flags */
1858 #if __FreeBSD_version >= 502000
1859 NULL, /* lockfunc */
1860 NULL, /* lockfuncarg */
1864 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1865 "bus_dma_tag_create failed; error %u\n",
1869 rx_buffer = adapter->rx_buffer_area;
1870 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1871 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1874 device_printf(adapter->dev,
1875 "ixgb_allocate_receive_structures: "
1876 "bus_dmamap_create failed; error %u\n",
1882 for (i = 0; i < adapter->num_rx_desc; i++) {
1883 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1884 adapter->rx_buffer_area[i].m_head = NULL;
1885 adapter->rx_desc_base[i].buff_addr = 0;
1892 bus_dma_tag_destroy(adapter->rxtag);
1894 adapter->rxtag = NULL;
1895 free(adapter->rx_buffer_area, M_DEVBUF);
1896 adapter->rx_buffer_area = NULL;
1900 /*********************************************************************
1902 * Allocate and initialize receive structures.
1904 **********************************************************************/
1906 ixgb_setup_receive_structures(struct adapter * adapter)
1908 bzero((void *)adapter->rx_desc_base,
1909 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1911 if (ixgb_allocate_receive_structures(adapter))
1914 /* Setup our descriptor pointers */
1915 adapter->next_rx_desc_to_check = 0;
1916 adapter->next_rx_desc_to_use = 0;
1920 /*********************************************************************
1922 * Enable receive unit.
1924 **********************************************************************/
1926 ixgb_initialize_receive_unit(struct adapter * adapter)
1929 u_int32_t reg_rxcsum;
1930 u_int32_t reg_rxdctl;
1932 u_int64_t rdba = adapter->rxdma.dma_paddr;
1937 * Make sure receives are disabled while setting up the descriptor
1940 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1941 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1943 /* Set the Receive Delay Timer Register */
1944 IXGB_WRITE_REG(&adapter->hw, RDTR,
1945 adapter->rx_int_delay);
1948 /* Setup the Base and Length of the Rx Descriptor Ring */
1949 IXGB_WRITE_REG(&adapter->hw, RDBAL,
1950 (rdba & 0x00000000ffffffffULL));
1951 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1952 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1953 sizeof(struct ixgb_rx_desc));
1955 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1956 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1958 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1962 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1963 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1964 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1965 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1969 if (adapter->raidc) {
1971 uint8_t poll_threshold;
1972 #define IXGB_RAIDC_POLL_DEFAULT 120
1974 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1975 poll_threshold >>= 1;
1976 poll_threshold &= 0x3F;
1977 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1978 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1979 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1981 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1983 /* Enable Receive Checksum Offload for TCP and UDP ? */
1984 if (ifp->if_capenable & IFCAP_RXCSUM) {
1985 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1986 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1987 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1989 /* Setup the Receive Control Register */
1990 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1991 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1992 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1994 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1996 switch (adapter->rx_buffer_len) {
1998 case IXGB_RXBUFFER_2048:
1999 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2001 case IXGB_RXBUFFER_4096:
2002 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2004 case IXGB_RXBUFFER_8192:
2005 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2007 case IXGB_RXBUFFER_16384:
2008 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2012 reg_rctl |= IXGB_RCTL_RXEN;
2015 /* Enable Receives */
2016 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2021 /*********************************************************************
2023 * Free receive related data structures.
2025 **********************************************************************/
2027 ixgb_free_receive_structures(struct adapter * adapter)
2029 struct ixgb_buffer *rx_buffer;
2032 INIT_DEBUGOUT("free_receive_structures: begin");
2034 if (adapter->rx_buffer_area != NULL) {
2035 rx_buffer = adapter->rx_buffer_area;
2036 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2037 if (rx_buffer->map != NULL) {
2038 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2039 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2041 if (rx_buffer->m_head != NULL)
2042 m_freem(rx_buffer->m_head);
2043 rx_buffer->m_head = NULL;
2046 if (adapter->rx_buffer_area != NULL) {
2047 free(adapter->rx_buffer_area, M_DEVBUF);
2048 adapter->rx_buffer_area = NULL;
2050 if (adapter->rxtag != NULL) {
2051 bus_dma_tag_destroy(adapter->rxtag);
2052 adapter->rxtag = NULL;
2057 /*********************************************************************
2059 * This routine executes in interrupt context. It replenishes
2060 * the mbufs in the descriptor and sends data which has been
2061 * dma'ed into host memory to upper layer.
2063 * We loop at most count times if count is > 0, or until done if
2066 *********************************************************************/
2068 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2072 #if __FreeBSD_version < 500000
2073 struct ether_header *eh;
2077 u_int8_t accept_frame = 0;
2079 int next_to_use = 0;
2082 /* Pointer to the receive descriptor being examined. */
2083 struct ixgb_rx_desc *current_desc;
2085 IXGB_LOCK_ASSERT(adapter);
2088 i = adapter->next_rx_desc_to_check;
2089 next_to_use = adapter->next_rx_desc_to_use;
2090 eop_desc = adapter->next_rx_desc_to_check;
2091 current_desc = &adapter->rx_desc_base[i];
2093 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2095 adapter->no_pkts_avail++;
2099 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2101 mp = adapter->rx_buffer_area[i].m_head;
2102 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2103 BUS_DMASYNC_POSTREAD);
2105 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2111 len = current_desc->length;
2113 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2114 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2115 IXGB_RX_DESC_ERRORS_RXE)) {
2120 /* Assign correct length to the current fragment */
2123 if (adapter->fmp == NULL) {
2124 mp->m_pkthdr.len = len;
2125 adapter->fmp = mp; /* Store the first mbuf */
2128 /* Chain mbuf's together */
2129 mp->m_flags &= ~M_PKTHDR;
2130 adapter->lmp->m_next = mp;
2131 adapter->lmp = adapter->lmp->m_next;
2132 adapter->fmp->m_pkthdr.len += len;
2137 adapter->fmp->m_pkthdr.rcvif = ifp;
2139 #if __FreeBSD_version < 500000
2140 eh = mtod(adapter->fmp, struct ether_header *);
2142 /* Remove ethernet header from mbuf */
2143 m_adj(adapter->fmp, sizeof(struct ether_header));
2144 ixgb_receive_checksum(adapter, current_desc,
2147 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2148 VLAN_INPUT_TAG(eh, adapter->fmp,
2149 current_desc->special);
2151 ether_input(ifp, eh, adapter->fmp);
2153 ixgb_receive_checksum(adapter, current_desc,
2155 #if __FreeBSD_version < 700000
2156 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2157 VLAN_INPUT_TAG(ifp, adapter->fmp,
2158 current_desc->special);
2160 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2161 adapter->fmp->m_pkthdr.ether_vtag =
2162 current_desc->special;
2163 adapter->fmp->m_flags |= M_VLANTAG;
2167 if (adapter->fmp != NULL) {
2168 IXGB_UNLOCK(adapter);
2169 (*ifp->if_input) (ifp, adapter->fmp);
2174 adapter->fmp = NULL;
2175 adapter->lmp = NULL;
2177 adapter->rx_buffer_area[i].m_head = NULL;
2179 adapter->dropped_pkts++;
2180 if (adapter->fmp != NULL)
2181 m_freem(adapter->fmp);
2182 adapter->fmp = NULL;
2183 adapter->lmp = NULL;
2186 /* Zero out the receive descriptors status */
2187 current_desc->status = 0;
2189 /* Advance our pointers to the next descriptor */
2190 if (++i == adapter->num_rx_desc) {
2192 current_desc = adapter->rx_desc_base;
2196 adapter->next_rx_desc_to_check = i;
2199 i = (adapter->num_rx_desc - 1);
2202 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2203 * memory corruption). Avoid using and re-submitting the most recently received RX
2204 * descriptor back to hardware.
2206 * if(Last written back descriptor == EOP bit set descriptor)
2207 * then avoid re-submitting the most recently received RX descriptor
2209 * if(Last written back descriptor != EOP bit set descriptor)
2210 * then avoid re-submitting the most recently received RX descriptors
2211 * till last EOP bit set descriptor.
2213 if (eop_desc != i) {
2214 if (++eop_desc == adapter->num_rx_desc)
2218 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2219 while (next_to_use != i) {
2220 current_desc = &adapter->rx_desc_base[next_to_use];
2221 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2222 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2223 IXGB_RX_DESC_ERRORS_RXE))) {
2224 mp = adapter->rx_buffer_area[next_to_use].m_head;
2225 ixgb_get_buf(next_to_use, adapter, mp);
2227 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2230 /* Advance our pointers to the next descriptor */
2231 if (++next_to_use == adapter->num_rx_desc) {
2233 current_desc = adapter->rx_desc_base;
2237 adapter->next_rx_desc_to_use = next_to_use;
2238 if (--next_to_use < 0)
2239 next_to_use = (adapter->num_rx_desc - 1);
2240 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
2241 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2246 /*********************************************************************
2248 * Verify that the hardware indicated that the checksum is valid.
2249 * Inform the stack about the status of checksum so that stack
2250 * doesn't spend time verifying the checksum.
2252 *********************************************************************/
2254 ixgb_receive_checksum(struct adapter * adapter,
2255 struct ixgb_rx_desc * rx_desc,
2258 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2259 mp->m_pkthdr.csum_flags = 0;
2262 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2264 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2265 /* IP Checksum Good */
2266 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2267 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2270 mp->m_pkthdr.csum_flags = 0;
2273 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2275 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2276 mp->m_pkthdr.csum_flags |=
2277 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2278 mp->m_pkthdr.csum_data = htons(0xffff);
2286 ixgb_enable_vlans(struct adapter * adapter)
2290 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2291 ctrl |= IXGB_CTRL0_VME;
2292 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2299 ixgb_enable_intr(struct adapter * adapter)
2301 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2302 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2307 ixgb_disable_intr(struct adapter * adapter)
2309 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2314 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2318 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2322 /**********************************************************************
2324 * Update the board statistics counters.
2326 **********************************************************************/
2328 ixgb_update_stats_counters(struct adapter * adapter)
2332 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2333 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2334 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2335 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2336 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2337 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2338 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2339 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2340 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2341 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2343 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2344 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2345 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2346 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2347 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2348 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2349 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2350 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2351 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2352 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2353 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2354 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2355 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2356 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2357 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2358 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2359 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2360 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2361 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2362 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2363 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2364 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2365 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2366 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2367 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2368 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2369 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2371 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2372 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2373 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2374 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2375 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2376 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2377 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2378 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2379 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2380 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2381 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2382 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2383 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2384 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2385 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2386 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2387 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2388 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2389 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2390 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2391 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2392 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2396 /* Fill out the OS statistics structure */
2397 ifp->if_ipackets = adapter->stats.gprcl;
2398 ifp->if_opackets = adapter->stats.gptcl;
2399 ifp->if_ibytes = adapter->stats.gorcl;
2400 ifp->if_obytes = adapter->stats.gotcl;
2401 ifp->if_imcasts = adapter->stats.mprcl;
2402 ifp->if_collisions = 0;
2406 adapter->dropped_pkts +
2407 adapter->stats.crcerrs +
2408 adapter->stats.rnbc +
2409 adapter->stats.mpc +
2410 adapter->stats.rlec;
2416 /**********************************************************************
2418 * This routine is called only when ixgb_display_debug_stats is enabled.
2419 * This routine provides a way to take a look at important statistics
2420 * maintained by the driver and hardware.
2422 **********************************************************************/
2424 ixgb_print_hw_stats(struct adapter * adapter)
2426 char buf_speed[100], buf_type[100];
2427 ixgb_bus_speed bus_speed;
2428 ixgb_bus_type bus_type;
2433 device_printf(dev, "Packets not Avail = %ld\n",
2434 adapter->no_pkts_avail);
2435 device_printf(dev, "CleanTxInterrupts = %ld\n",
2436 adapter->clean_tx_interrupts);
2437 device_printf(dev, "ICR RXDMT0 = %lld\n",
2438 (long long)adapter->sv_stats.icr_rxdmt0);
2439 device_printf(dev, "ICR RXO = %lld\n",
2440 (long long)adapter->sv_stats.icr_rxo);
2441 device_printf(dev, "ICR RXT0 = %lld\n",
2442 (long long)adapter->sv_stats.icr_rxt0);
2443 device_printf(dev, "ICR TXDW = %lld\n",
2444 (long long)adapter->sv_stats.icr_TXDW);
2447 bus_speed = adapter->hw.bus.speed;
2448 bus_type = adapter->hw.bus.type;
2450 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2451 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2452 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2453 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2455 device_printf(dev, "PCI_Bus_Speed = %s\n",
2459 bus_type == ixgb_bus_type_pci ? "PCI" :
2460 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2462 device_printf(dev, "PCI_Bus_Type = %s\n",
2465 device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2466 adapter->no_tx_desc_avail1);
2467 device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2468 adapter->no_tx_desc_avail2);
2469 device_printf(dev, "Std Mbuf Failed = %ld\n",
2470 adapter->mbuf_alloc_failed);
2471 device_printf(dev, "Std Cluster Failed = %ld\n",
2472 adapter->mbuf_cluster_failed);
2474 device_printf(dev, "Defer count = %lld\n",
2475 (long long)adapter->stats.dc);
2476 device_printf(dev, "Missed Packets = %lld\n",
2477 (long long)adapter->stats.mpc);
2478 device_printf(dev, "Receive No Buffers = %lld\n",
2479 (long long)adapter->stats.rnbc);
2480 device_printf(dev, "Receive length errors = %lld\n",
2481 (long long)adapter->stats.rlec);
2482 device_printf(dev, "Crc errors = %lld\n",
2483 (long long)adapter->stats.crcerrs);
2484 device_printf(dev, "Driver dropped packets = %ld\n",
2485 adapter->dropped_pkts);
2487 device_printf(dev, "XON Rcvd = %lld\n",
2488 (long long)adapter->stats.xonrxc);
2489 device_printf(dev, "XON Xmtd = %lld\n",
2490 (long long)adapter->stats.xontxc);
2491 device_printf(dev, "XOFF Rcvd = %lld\n",
2492 (long long)adapter->stats.xoffrxc);
2493 device_printf(dev, "XOFF Xmtd = %lld\n",
2494 (long long)adapter->stats.xofftxc);
2496 device_printf(dev, "Good Packets Rcvd = %lld\n",
2497 (long long)adapter->stats.gprcl);
2498 device_printf(dev, "Good Packets Xmtd = %lld\n",
2499 (long long)adapter->stats.gptcl);
2501 device_printf(dev, "Jumbo frames recvd = %lld\n",
2502 (long long)adapter->stats.jprcl);
2503 device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2504 (long long)adapter->stats.jptcl);
2511 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2515 struct adapter *adapter;
2518 error = sysctl_handle_int(oidp, &result, 0, req);
2520 if (error || !req->newptr)
2524 adapter = (struct adapter *) arg1;
2525 ixgb_print_hw_stats(adapter);