1 /*******************************************************************************
3 Copyright (c) 2001-2004, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
40 #include <dev/ixgb/if_ixgb.h>
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgb_display_debug_stats = 0;
47 /*********************************************************************
48 * Linked list of board private structures for all NICs found
49 *********************************************************************/
51 struct adapter *ixgb_adapter_list = NULL;
55 /*********************************************************************
57 *********************************************************************/
59 char ixgb_driver_version[] = "1.0.6";
60 char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
62 /*********************************************************************
65 * Used by probe to select devices to load on
66 * Last field stores an index into ixgb_strings
67 * Last entry must be all 0s
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
74 /* Intel(R) PRO/10000 Network Connection */
75 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77 /* required last entry */
81 /*********************************************************************
82 * Table of branding strings for all supported NICs.
83 *********************************************************************/
85 static char *ixgb_strings[] = {
86 "Intel(R) PRO/10GbE Network Driver"
89 /*********************************************************************
91 *********************************************************************/
92 static int ixgb_probe(device_t);
93 static int ixgb_attach(device_t);
94 static int ixgb_detach(device_t);
95 static int ixgb_shutdown(device_t);
96 static void ixgb_intr(void *);
97 static void ixgb_start(struct ifnet *);
98 static void ixgb_start_locked(struct ifnet *);
99 static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void ixgb_watchdog(struct adapter *);
101 static void ixgb_init(void *);
102 static void ixgb_init_locked(struct adapter *);
103 static void ixgb_stop(void *);
104 static void ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int ixgb_media_change(struct ifnet *);
106 static void ixgb_identify_hardware(struct adapter *);
107 static int ixgb_allocate_pci_resources(struct adapter *);
108 static void ixgb_free_pci_resources(struct adapter *);
109 static void ixgb_local_timer(void *);
110 static int ixgb_hardware_init(struct adapter *);
111 static int ixgb_setup_interface(device_t, struct adapter *);
112 static int ixgb_setup_transmit_structures(struct adapter *);
113 static void ixgb_initialize_transmit_unit(struct adapter *);
114 static int ixgb_setup_receive_structures(struct adapter *);
115 static void ixgb_initialize_receive_unit(struct adapter *);
116 static void ixgb_enable_intr(struct adapter *);
117 static void ixgb_disable_intr(struct adapter *);
118 static void ixgb_free_transmit_structures(struct adapter *);
119 static void ixgb_free_receive_structures(struct adapter *);
120 static void ixgb_update_stats_counters(struct adapter *);
121 static void ixgb_clean_transmit_interrupts(struct adapter *);
122 static int ixgb_allocate_receive_structures(struct adapter *);
123 static int ixgb_allocate_transmit_structures(struct adapter *);
124 static int ixgb_process_receive_interrupts(struct adapter *, int);
126 ixgb_receive_checksum(struct adapter *,
127 struct ixgb_rx_desc * rx_desc,
130 ixgb_transmit_checksum_setup(struct adapter *,
133 static void ixgb_set_promisc(struct adapter *);
134 static void ixgb_disable_promisc(struct adapter *);
135 static void ixgb_set_multi(struct adapter *);
136 static void ixgb_print_hw_stats(struct adapter *);
137 static void ixgb_print_link_status(struct adapter *);
139 ixgb_get_buf(int i, struct adapter *,
141 static void ixgb_enable_vlans(struct adapter * adapter);
142 static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146 struct ixgb_dma_alloc *, int);
147 static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
152 /*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
156 static device_method_t ixgb_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, ixgb_probe),
159 DEVMETHOD(device_attach, ixgb_attach),
160 DEVMETHOD(device_detach, ixgb_detach),
161 DEVMETHOD(device_shutdown, ixgb_shutdown),
166 static driver_t ixgb_driver = {
167 "ixgb", ixgb_methods, sizeof(struct adapter),
170 static devclass_t ixgb_devclass;
171 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
173 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
174 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
176 /* some defines for controlling descriptor fetches in h/w */
177 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
178 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
179 * pushed this many descriptors from
181 #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */
184 /*********************************************************************
185 * Device identification routine
187 * ixgb_probe determines if the driver should be loaded on
188 * adapter based on PCI vendor/device id of the adapter.
190 * return 0 on success, positive on failure
191 *********************************************************************/
194 ixgb_probe(device_t dev)
196 ixgb_vendor_info_t *ent;
198 u_int16_t pci_vendor_id = 0;
199 u_int16_t pci_device_id = 0;
200 u_int16_t pci_subvendor_id = 0;
201 u_int16_t pci_subdevice_id = 0;
202 char adapter_name[60];
204 INIT_DEBUGOUT("ixgb_probe: begin");
206 pci_vendor_id = pci_get_vendor(dev);
207 if (pci_vendor_id != IXGB_VENDOR_ID)
210 pci_device_id = pci_get_device(dev);
211 pci_subvendor_id = pci_get_subvendor(dev);
212 pci_subdevice_id = pci_get_subdevice(dev);
214 ent = ixgb_vendor_info_array;
215 while (ent->vendor_id != 0) {
216 if ((pci_vendor_id == ent->vendor_id) &&
217 (pci_device_id == ent->device_id) &&
219 ((pci_subvendor_id == ent->subvendor_id) ||
220 (ent->subvendor_id == PCI_ANY_ID)) &&
222 ((pci_subdevice_id == ent->subdevice_id) ||
223 (ent->subdevice_id == PCI_ANY_ID))) {
224 sprintf(adapter_name, "%s, Version - %s",
225 ixgb_strings[ent->index],
226 ixgb_driver_version);
227 device_set_desc_copy(dev, adapter_name);
228 return (BUS_PROBE_DEFAULT);
236 /*********************************************************************
237 * Device initialization routine
239 * The attach entry point is called when the driver is being loaded.
240 * This routine identifies the type of hardware, allocates all resources
241 * and initializes the hardware.
243 * return 0 on success, positive on failure
244 *********************************************************************/
247 ixgb_attach(device_t dev)
249 struct adapter *adapter;
253 device_printf(dev, "%s\n", ixgb_copyright);
254 INIT_DEBUGOUT("ixgb_attach: begin");
256 /* Allocate, clear, and link in our adapter structure */
257 if (!(adapter = device_get_softc(dev))) {
258 device_printf(dev, "adapter structure allocation failed\n");
261 bzero(adapter, sizeof(struct adapter));
263 adapter->osdep.dev = dev;
264 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
266 if (ixgb_adapter_list != NULL)
267 ixgb_adapter_list->prev = adapter;
268 adapter->next = ixgb_adapter_list;
269 ixgb_adapter_list = adapter;
272 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
276 ixgb_sysctl_stats, "I", "Statistics");
278 callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
280 /* Determine hardware revision */
281 ixgb_identify_hardware(adapter);
283 /* Parameters (to be read from user) */
284 adapter->num_tx_desc = IXGB_MAX_TXD;
285 adapter->num_rx_desc = IXGB_MAX_RXD;
286 adapter->tx_int_delay = TIDV;
287 adapter->rx_int_delay = RDTR;
288 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
290 adapter->hw.fc.high_water = FCRTH;
291 adapter->hw.fc.low_water = FCRTL;
292 adapter->hw.fc.pause_time = FCPAUSE;
293 adapter->hw.fc.send_xon = TRUE;
294 adapter->hw.fc.type = FLOW_CONTROL;
297 /* Set the max frame size assuming standard ethernet sized frames */
298 adapter->hw.max_frame_size =
299 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
301 if (ixgb_allocate_pci_resources(adapter)) {
302 device_printf(dev, "Allocation of PCI resources failed\n");
306 tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
307 sizeof(struct ixgb_tx_desc), 4096);
309 /* Allocate Transmit Descriptor ring */
310 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
311 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
315 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
317 rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
318 sizeof(struct ixgb_rx_desc), 4096);
320 /* Allocate Receive Descriptor ring */
321 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
322 device_printf(dev, "Unable to allocate rx_desc memory\n");
326 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
328 /* Allocate multicast array memory. */
329 adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
330 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
331 if (adapter->mta == NULL) {
332 device_printf(dev, "Can not allocate multicast setup array\n");
337 /* Initialize the hardware */
338 if (ixgb_hardware_init(adapter)) {
339 device_printf(dev, "Unable to initialize the hardware\n");
343 /* Setup OS specific network interface */
344 if (ixgb_setup_interface(dev, adapter) != 0)
347 /* Initialize statistics */
348 ixgb_clear_hw_cntrs(&adapter->hw);
349 ixgb_update_stats_counters(adapter);
351 INIT_DEBUGOUT("ixgb_attach: end");
355 ixgb_dma_free(adapter, &adapter->rxdma);
357 ixgb_dma_free(adapter, &adapter->txdma);
360 if (adapter->ifp != NULL)
361 if_free(adapter->ifp);
362 ixgb_free_pci_resources(adapter);
363 sysctl_ctx_free(&adapter->sysctl_ctx);
364 free(adapter->mta, M_DEVBUF);
369 /*********************************************************************
370 * Device removal routine
372 * The detach entry point is called when the driver is being removed.
373 * This routine stops the adapter and deallocates all the resources
374 * that were allocated for driver operation.
376 * return 0 on success, positive on failure
377 *********************************************************************/
380 ixgb_detach(device_t dev)
382 struct adapter *adapter = device_get_softc(dev);
383 struct ifnet *ifp = adapter->ifp;
385 INIT_DEBUGOUT("ixgb_detach: begin");
387 #ifdef DEVICE_POLLING
388 if (ifp->if_capenable & IFCAP_POLLING)
389 ether_poll_deregister(ifp);
393 adapter->in_detach = 1;
396 IXGB_UNLOCK(adapter);
398 #if __FreeBSD_version < 500000
399 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
403 callout_drain(&adapter->timer);
404 ixgb_free_pci_resources(adapter);
405 #if __FreeBSD_version >= 500000
409 /* Free Transmit Descriptor ring */
410 if (adapter->tx_desc_base) {
411 ixgb_dma_free(adapter, &adapter->txdma);
412 adapter->tx_desc_base = NULL;
414 /* Free Receive Descriptor ring */
415 if (adapter->rx_desc_base) {
416 ixgb_dma_free(adapter, &adapter->rxdma);
417 adapter->rx_desc_base = NULL;
419 /* Remove from the adapter list */
420 if (ixgb_adapter_list == adapter)
421 ixgb_adapter_list = adapter->next;
422 if (adapter->next != NULL)
423 adapter->next->prev = adapter->prev;
424 if (adapter->prev != NULL)
425 adapter->prev->next = adapter->next;
426 free(adapter->mta, M_DEVBUF);
428 IXGB_LOCK_DESTROY(adapter);
432 /*********************************************************************
434 * Shutdown entry point
436 **********************************************************************/
439 ixgb_shutdown(device_t dev)
441 struct adapter *adapter = device_get_softc(dev);
444 IXGB_UNLOCK(adapter);
449 /*********************************************************************
450 * Transmit entry point
452 * ixgb_start is called by the stack to initiate a transmit.
453 * The driver will remain in this routine as long as there are
454 * packets to transmit and transmit resources are available.
455 * In case resources are not available stack is notified and
456 * the packet is requeued.
457 **********************************************************************/
460 ixgb_start_locked(struct ifnet * ifp)
463 struct adapter *adapter = ifp->if_softc;
465 IXGB_LOCK_ASSERT(adapter);
467 if (!adapter->link_active)
470 while (ifp->if_snd.ifq_head != NULL) {
471 IF_DEQUEUE(&ifp->if_snd, m_head);
476 if (ixgb_encap(adapter, m_head)) {
477 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
478 IF_PREPEND(&ifp->if_snd, m_head);
481 /* Send a copy of the frame to the BPF listener */
482 #if __FreeBSD_version < 500000
484 bpf_mtap(ifp, m_head);
486 ETHER_BPF_MTAP(ifp, m_head);
488 /* Set timeout in case hardware has problems transmitting */
489 adapter->tx_timer = IXGB_TX_TIMEOUT;
496 ixgb_start(struct ifnet *ifp)
498 struct adapter *adapter = ifp->if_softc;
501 ixgb_start_locked(ifp);
502 IXGB_UNLOCK(adapter);
506 /*********************************************************************
509 * ixgb_ioctl is called when the user wants to configure the
512 * return 0 on success, positive on failure
513 **********************************************************************/
516 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
519 struct ifreq *ifr = (struct ifreq *) data;
520 struct adapter *adapter = ifp->if_softc;
522 if (adapter->in_detach)
528 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
529 ether_ioctl(ifp, command, data);
532 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
533 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
537 ifp->if_mtu = ifr->ifr_mtu;
538 adapter->hw.max_frame_size =
539 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
541 ixgb_init_locked(adapter);
542 IXGB_UNLOCK(adapter);
546 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
548 if (ifp->if_flags & IFF_UP) {
549 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
550 ixgb_init_locked(adapter);
552 ixgb_disable_promisc(adapter);
553 ixgb_set_promisc(adapter);
555 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
559 IXGB_UNLOCK(adapter);
563 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
564 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
566 ixgb_disable_intr(adapter);
567 ixgb_set_multi(adapter);
568 ixgb_enable_intr(adapter);
569 IXGB_UNLOCK(adapter);
574 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
575 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
578 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
579 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
580 #ifdef DEVICE_POLLING
581 if (mask & IFCAP_POLLING) {
582 if (ifr->ifr_reqcap & IFCAP_POLLING) {
583 error = ether_poll_register(ixgb_poll, ifp);
587 ixgb_disable_intr(adapter);
588 ifp->if_capenable |= IFCAP_POLLING;
589 IXGB_UNLOCK(adapter);
591 error = ether_poll_deregister(ifp);
592 /* Enable interrupt even in error case */
594 ixgb_enable_intr(adapter);
595 ifp->if_capenable &= ~IFCAP_POLLING;
596 IXGB_UNLOCK(adapter);
599 #endif /* DEVICE_POLLING */
600 if (mask & IFCAP_HWCSUM) {
601 if (IFCAP_HWCSUM & ifp->if_capenable)
602 ifp->if_capenable &= ~IFCAP_HWCSUM;
604 ifp->if_capenable |= IFCAP_HWCSUM;
605 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
610 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
618 /*********************************************************************
619 * Watchdog entry point
621 * This routine is called whenever hardware quits transmitting.
623 **********************************************************************/
626 ixgb_watchdog(struct adapter *adapter)
633 * If we are in this routine because of pause frames, then don't
634 * reset the hardware.
636 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
637 adapter->tx_timer = IXGB_TX_TIMEOUT;
640 if_printf(ifp, "watchdog timeout -- resetting\n");
643 ixgb_init_locked(adapter);
651 /*********************************************************************
654 * This routine is used in two ways. It is used by the stack as
655 * init entry point in network interface structure. It is also used
656 * by the driver as a hw/sw initialization routine to get to a
659 * return 0 on success, positive on failure
660 **********************************************************************/
663 ixgb_init_locked(struct adapter *adapter)
667 INIT_DEBUGOUT("ixgb_init: begin");
669 IXGB_LOCK_ASSERT(adapter);
674 /* Get the latest mac address, User can use a LAA */
675 bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
676 IXGB_ETH_LENGTH_OF_ADDRESS);
678 /* Initialize the hardware */
679 if (ixgb_hardware_init(adapter)) {
680 if_printf(ifp, "Unable to initialize the hardware\n");
683 ixgb_enable_vlans(adapter);
685 /* Prepare transmit descriptors and buffers */
686 if (ixgb_setup_transmit_structures(adapter)) {
687 if_printf(ifp, "Could not setup transmit structures\n");
691 ixgb_initialize_transmit_unit(adapter);
693 /* Setup Multicast table */
694 ixgb_set_multi(adapter);
696 /* Prepare receive descriptors and buffers */
697 if (ixgb_setup_receive_structures(adapter)) {
698 if_printf(ifp, "Could not setup receive structures\n");
702 ixgb_initialize_receive_unit(adapter);
704 /* Don't lose promiscuous settings */
705 ixgb_set_promisc(adapter);
708 ifp->if_drv_flags |= IFF_DRV_RUNNING;
709 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
712 if (ifp->if_capenable & IFCAP_TXCSUM)
713 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
715 ifp->if_hwassist = 0;
718 /* Enable jumbo frames */
719 if (ifp->if_mtu > ETHERMTU) {
721 IXGB_WRITE_REG(&adapter->hw, MFS,
722 adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
723 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
724 temp_reg |= IXGB_CTRL0_JFE;
725 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
727 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
728 ixgb_clear_hw_cntrs(&adapter->hw);
729 #ifdef DEVICE_POLLING
731 * Only disable interrupts if we are polling, make sure they are on
734 if (ifp->if_capenable & IFCAP_POLLING)
735 ixgb_disable_intr(adapter);
738 ixgb_enable_intr(adapter);
746 struct adapter *adapter = arg;
749 ixgb_init_locked(adapter);
750 IXGB_UNLOCK(adapter);
754 #ifdef DEVICE_POLLING
756 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
758 struct adapter *adapter = ifp->if_softc;
762 IXGB_LOCK_ASSERT(adapter);
764 if (cmd == POLL_AND_CHECK_STATUS) {
765 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
766 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
767 ixgb_check_for_link(&adapter->hw);
768 ixgb_print_link_status(adapter);
771 rx_npkts = ixgb_process_receive_interrupts(adapter, count);
772 ixgb_clean_transmit_interrupts(adapter);
774 if (ifp->if_snd.ifq_head != NULL)
775 ixgb_start_locked(ifp);
780 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
782 struct adapter *adapter = ifp->if_softc;
786 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
787 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
788 IXGB_UNLOCK(adapter);
791 #endif /* DEVICE_POLLING */
793 /*********************************************************************
795 * Interrupt Service routine
797 **********************************************************************/
802 u_int32_t loop_cnt = IXGB_MAX_INTR;
805 struct adapter *adapter = arg;
806 boolean_t rxdmt0 = FALSE;
812 #ifdef DEVICE_POLLING
813 if (ifp->if_capenable & IFCAP_POLLING) {
814 IXGB_UNLOCK(adapter);
819 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
821 IXGB_UNLOCK(adapter);
825 if (reg_icr & IXGB_INT_RXDMT0)
829 if (reg_icr & IXGB_INT_RXDMT0)
830 adapter->sv_stats.icr_rxdmt0++;
831 if (reg_icr & IXGB_INT_RXO)
832 adapter->sv_stats.icr_rxo++;
833 if (reg_icr & IXGB_INT_RXT0)
834 adapter->sv_stats.icr_rxt0++;
835 if (reg_icr & IXGB_INT_TXDW)
836 adapter->sv_stats.icr_TXDW++;
839 /* Link status change */
840 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
841 ixgb_check_for_link(&adapter->hw);
842 ixgb_print_link_status(adapter);
844 while (loop_cnt > 0) {
845 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
846 ixgb_process_receive_interrupts(adapter, -1);
847 ixgb_clean_transmit_interrupts(adapter);
852 if (rxdmt0 && adapter->raidc) {
853 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
854 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
856 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
857 ixgb_start_locked(ifp);
859 IXGB_UNLOCK(adapter);
864 /*********************************************************************
866 * Media Ioctl callback
868 * This routine is called whenever the user queries the status of
869 * the interface using ifconfig.
871 **********************************************************************/
873 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
875 struct adapter *adapter = ifp->if_softc;
877 INIT_DEBUGOUT("ixgb_media_status: begin");
879 ixgb_check_for_link(&adapter->hw);
880 ixgb_print_link_status(adapter);
882 ifmr->ifm_status = IFM_AVALID;
883 ifmr->ifm_active = IFM_ETHER;
885 if (!adapter->hw.link_up)
888 ifmr->ifm_status |= IFM_ACTIVE;
889 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
894 /*********************************************************************
896 * Media Ioctl callback
898 * This routine is called when the user changes speed/duplex using
899 * media/mediopt option with ifconfig.
901 **********************************************************************/
903 ixgb_media_change(struct ifnet * ifp)
905 struct adapter *adapter = ifp->if_softc;
906 struct ifmedia *ifm = &adapter->media;
908 INIT_DEBUGOUT("ixgb_media_change: begin");
910 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
916 /*********************************************************************
918 * This routine maps the mbufs to tx descriptors.
920 * return 0 on success, positive on failure
921 **********************************************************************/
924 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
927 int i, j, error, nsegs;
929 #if __FreeBSD_version < 500000
930 struct ifvlan *ifv = NULL;
932 bus_dma_segment_t segs[IXGB_MAX_SCATTER];
934 struct ixgb_buffer *tx_buffer = NULL;
935 struct ixgb_tx_desc *current_tx_desc = NULL;
936 struct ifnet *ifp = adapter->ifp;
939 * Force a cleanup if number of TX descriptors available hits the
942 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
943 ixgb_clean_transmit_interrupts(adapter);
945 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
946 adapter->no_tx_desc_avail1++;
950 * Map the packet for DMA.
952 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
953 adapter->no_tx_map_avail++;
956 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
957 &nsegs, BUS_DMA_NOWAIT);
959 adapter->no_tx_dma_setup++;
960 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
961 "error %u\n", error);
962 bus_dmamap_destroy(adapter->txtag, map);
965 KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
967 if (nsegs > adapter->num_tx_desc_avail) {
968 adapter->no_tx_desc_avail2++;
969 bus_dmamap_destroy(adapter->txtag, map);
972 if (ifp->if_hwassist > 0) {
973 ixgb_transmit_checksum_setup(adapter, m_head,
978 /* Find out if we are in vlan mode */
979 #if __FreeBSD_version < 500000
980 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
981 m_head->m_pkthdr.rcvif != NULL &&
982 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
983 ifv = m_head->m_pkthdr.rcvif->if_softc;
984 #elseif __FreeBSD_version < 700000
985 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
987 i = adapter->next_avail_tx_desc;
988 for (j = 0; j < nsegs; j++) {
989 tx_buffer = &adapter->tx_buffer_area[i];
990 current_tx_desc = &adapter->tx_desc_base[i];
992 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
993 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
994 current_tx_desc->popts = txd_popts;
995 if (++i == adapter->num_tx_desc)
998 tx_buffer->m_head = NULL;
1001 adapter->num_tx_desc_avail -= nsegs;
1002 adapter->next_avail_tx_desc = i;
1004 #if __FreeBSD_version < 500000
1006 /* Set the vlan id */
1007 current_tx_desc->vlan = ifv->ifv_tag;
1008 #elseif __FreeBSD_version < 700000
1010 /* Set the vlan id */
1011 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1013 if (m_head->m_flags & M_VLANTAG) {
1014 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1017 /* Tell hardware to add tag */
1018 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1020 tx_buffer->m_head = m_head;
1021 tx_buffer->map = map;
1022 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1025 * Last Descriptor of Packet needs End Of Packet (EOP)
1027 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1030 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1031 * that this frame is available to transmit.
1033 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1039 ixgb_set_promisc(struct adapter * adapter)
1043 struct ifnet *ifp = adapter->ifp;
1045 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1047 if (ifp->if_flags & IFF_PROMISC) {
1048 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1049 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1050 } else if (ifp->if_flags & IFF_ALLMULTI) {
1051 reg_rctl |= IXGB_RCTL_MPE;
1052 reg_rctl &= ~IXGB_RCTL_UPE;
1053 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1059 ixgb_disable_promisc(struct adapter * adapter)
1063 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1065 reg_rctl &= (~IXGB_RCTL_UPE);
1066 reg_rctl &= (~IXGB_RCTL_MPE);
1067 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1073 /*********************************************************************
1076 * This routine is called whenever multicast address list is updated.
1078 **********************************************************************/
1081 ixgb_set_multi(struct adapter * adapter)
1083 u_int32_t reg_rctl = 0;
1085 struct ifmultiaddr *ifma;
1087 struct ifnet *ifp = adapter->ifp;
1089 IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1092 bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1093 MAX_NUM_MULTICAST_ADDRESSES);
1095 if_maddr_rlock(ifp);
1096 #if __FreeBSD_version < 500000
1097 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1099 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1101 if (ifma->ifma_addr->sa_family != AF_LINK)
1104 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1105 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1108 if_maddr_runlock(ifp);
1110 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1111 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1112 reg_rctl |= IXGB_RCTL_MPE;
1113 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1115 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1121 /*********************************************************************
1124 * This routine checks for link status and updates statistics.
1126 **********************************************************************/
1129 ixgb_local_timer(void *arg)
1132 struct adapter *adapter = arg;
1135 IXGB_LOCK_ASSERT(adapter);
1137 ixgb_check_for_link(&adapter->hw);
1138 ixgb_print_link_status(adapter);
1139 ixgb_update_stats_counters(adapter);
1140 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141 ixgb_print_hw_stats(adapter);
1143 if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1144 ixgb_watchdog(adapter);
1145 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1149 ixgb_print_link_status(struct adapter * adapter)
1151 if (adapter->hw.link_up) {
1152 if (!adapter->link_active) {
1153 if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1156 adapter->link_active = 1;
1159 if (adapter->link_active) {
1160 if_printf(adapter->ifp, "Link is Down \n");
1161 adapter->link_active = 0;
1170 /*********************************************************************
1172 * This routine disables all traffic on the adapter by issuing a
1173 * global reset on the MAC and deallocates TX/RX buffers.
1175 **********************************************************************/
1178 ixgb_stop(void *arg)
1181 struct adapter *adapter = arg;
1184 IXGB_LOCK_ASSERT(adapter);
1186 INIT_DEBUGOUT("ixgb_stop: begin\n");
1187 ixgb_disable_intr(adapter);
1188 adapter->hw.adapter_stopped = FALSE;
1189 ixgb_adapter_stop(&adapter->hw);
1190 callout_stop(&adapter->timer);
1191 ixgb_free_transmit_structures(adapter);
1192 ixgb_free_receive_structures(adapter);
1194 /* Tell the stack that the interface is no longer active */
1195 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1196 adapter->tx_timer = 0;
1202 /*********************************************************************
1204 * Determine hardware revision.
1206 **********************************************************************/
1208 ixgb_identify_hardware(struct adapter * adapter)
1210 device_t dev = adapter->dev;
1212 /* Make sure our PCI config space has the necessary stuff set */
1213 pci_enable_busmaster(dev);
1214 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1216 /* Save off the information about this board */
1217 adapter->hw.vendor_id = pci_get_vendor(dev);
1218 adapter->hw.device_id = pci_get_device(dev);
1219 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1220 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1221 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1223 /* Set MacType, etc. based on this PCI info */
1224 switch (adapter->hw.device_id) {
1225 case IXGB_DEVICE_ID_82597EX:
1226 case IXGB_DEVICE_ID_82597EX_SR:
1227 adapter->hw.mac_type = ixgb_82597;
1230 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1231 device_printf(dev, "unsupported device id 0x%x\n",
1232 adapter->hw.device_id);
1239 ixgb_allocate_pci_resources(struct adapter * adapter)
1242 device_t dev = adapter->dev;
1245 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1248 if (!(adapter->res_memory)) {
1249 device_printf(dev, "Unable to allocate bus resource: memory\n");
1252 adapter->osdep.mem_bus_space_tag =
1253 rman_get_bustag(adapter->res_memory);
1254 adapter->osdep.mem_bus_space_handle =
1255 rman_get_bushandle(adapter->res_memory);
1256 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1259 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1261 RF_SHAREABLE | RF_ACTIVE);
1262 if (!(adapter->res_interrupt)) {
1264 "Unable to allocate bus resource: interrupt\n");
1267 if (bus_setup_intr(dev, adapter->res_interrupt,
1268 INTR_TYPE_NET | INTR_MPSAFE,
1269 NULL, (void (*) (void *))ixgb_intr, adapter,
1270 &adapter->int_handler_tag)) {
1271 device_printf(dev, "Error registering interrupt handler!\n");
1274 adapter->hw.back = &adapter->osdep;
1280 ixgb_free_pci_resources(struct adapter * adapter)
1282 device_t dev = adapter->dev;
1284 if (adapter->res_interrupt != NULL) {
1285 bus_teardown_intr(dev, adapter->res_interrupt,
1286 adapter->int_handler_tag);
1287 bus_release_resource(dev, SYS_RES_IRQ, 0,
1288 adapter->res_interrupt);
1290 if (adapter->res_memory != NULL) {
1291 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1292 adapter->res_memory);
1294 if (adapter->res_ioport != NULL) {
1295 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1296 adapter->res_ioport);
1301 /*********************************************************************
1303 * Initialize the hardware to a configuration as specified by the
1304 * adapter structure. The controller is reset, the EEPROM is
1305 * verified, the MAC address is set, then the shared initialization
1306 * routines are called.
1308 **********************************************************************/
1310 ixgb_hardware_init(struct adapter * adapter)
1312 /* Issue a global reset */
1313 adapter->hw.adapter_stopped = FALSE;
1314 ixgb_adapter_stop(&adapter->hw);
1316 /* Make sure we have a good EEPROM before we read from it */
1317 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1318 device_printf(adapter->dev,
1319 "The EEPROM Checksum Is Not Valid\n");
1322 if (!ixgb_init_hw(&adapter->hw)) {
1323 device_printf(adapter->dev, "Hardware Initialization Failed");
1330 /*********************************************************************
1332 * Setup networking device structure and register an interface.
1334 **********************************************************************/
1336 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1339 INIT_DEBUGOUT("ixgb_setup_interface: begin");
1341 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1343 device_printf(dev, "can not allocate ifnet structure\n");
1346 #if __FreeBSD_version >= 502000
1347 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1349 ifp->if_unit = device_get_unit(dev);
1350 ifp->if_name = "ixgb";
1352 ifp->if_baudrate = 1000000000;
1353 ifp->if_init = ixgb_init;
1354 ifp->if_softc = adapter;
1355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1356 ifp->if_ioctl = ixgb_ioctl;
1357 ifp->if_start = ixgb_start;
1358 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1360 #if __FreeBSD_version < 500000
1361 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1363 ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1366 ifp->if_capabilities = IFCAP_HWCSUM;
1369 * Tell the upper layer(s) we support long frames.
1371 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1373 #if __FreeBSD_version >= 500000
1374 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1377 ifp->if_capenable = ifp->if_capabilities;
1379 #ifdef DEVICE_POLLING
1380 ifp->if_capabilities |= IFCAP_POLLING;
1384 * Specify the media types supported by this adapter and register
1385 * callbacks to update media and link information
1387 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1389 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1391 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1393 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1394 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1399 /********************************************************************
1400 * Manage DMA'able memory.
1401 *******************************************************************/
1403 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1407 *(bus_addr_t *) arg = segs->ds_addr;
1412 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1413 struct ixgb_dma_alloc * dma, int mapflags)
1419 r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1420 PAGE_SIZE, 0, /* alignment, bounds */
1421 BUS_SPACE_MAXADDR, /* lowaddr */
1422 BUS_SPACE_MAXADDR, /* highaddr */
1423 NULL, NULL, /* filter, filterarg */
1426 size, /* maxsegsize */
1427 BUS_DMA_ALLOCNOW, /* flags */
1428 #if __FreeBSD_version >= 502000
1429 NULL, /* lockfunc */
1430 NULL, /* lockfuncarg */
1434 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1438 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1439 BUS_DMA_NOWAIT, &dma->dma_map);
1441 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1445 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1449 mapflags | BUS_DMA_NOWAIT);
1451 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1455 dma->dma_size = size;
1458 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1460 bus_dma_tag_destroy(dma->dma_tag);
1462 dma->dma_tag = NULL;
1469 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1471 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1472 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1473 bus_dma_tag_destroy(dma->dma_tag);
1476 /*********************************************************************
1478 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1479 * the information needed to transmit a packet on the wire.
1481 **********************************************************************/
1483 ixgb_allocate_transmit_structures(struct adapter * adapter)
1485 if (!(adapter->tx_buffer_area =
1486 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1487 adapter->num_tx_desc, M_DEVBUF,
1488 M_NOWAIT | M_ZERO))) {
1489 device_printf(adapter->dev,
1490 "Unable to allocate tx_buffer memory\n");
1493 bzero(adapter->tx_buffer_area,
1494 sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1499 /*********************************************************************
1501 * Allocate and initialize transmit structures.
1503 **********************************************************************/
1505 ixgb_setup_transmit_structures(struct adapter * adapter)
1508 * Setup DMA descriptor areas.
1510 if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1511 PAGE_SIZE, 0, /* alignment, bounds */
1512 BUS_SPACE_MAXADDR, /* lowaddr */
1513 BUS_SPACE_MAXADDR, /* highaddr */
1514 NULL, NULL, /* filter, filterarg */
1515 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */
1516 IXGB_MAX_SCATTER, /* nsegments */
1517 MCLBYTES, /* maxsegsize */
1518 BUS_DMA_ALLOCNOW, /* flags */
1519 #if __FreeBSD_version >= 502000
1520 NULL, /* lockfunc */
1521 NULL, /* lockfuncarg */
1524 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1527 if (ixgb_allocate_transmit_structures(adapter))
1530 bzero((void *)adapter->tx_desc_base,
1531 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1533 adapter->next_avail_tx_desc = 0;
1534 adapter->oldest_used_tx_desc = 0;
1536 /* Set number of descriptors available */
1537 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1539 /* Set checksum context */
1540 adapter->active_checksum_context = OFFLOAD_NONE;
1545 /*********************************************************************
1547 * Enable transmit unit.
1549 **********************************************************************/
1551 ixgb_initialize_transmit_unit(struct adapter * adapter)
1554 u_int64_t tdba = adapter->txdma.dma_paddr;
1556 /* Setup the Base and Length of the Tx Descriptor Ring */
1557 IXGB_WRITE_REG(&adapter->hw, TDBAL,
1558 (tdba & 0x00000000ffffffffULL));
1559 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1560 IXGB_WRITE_REG(&adapter->hw, TDLEN,
1561 adapter->num_tx_desc *
1562 sizeof(struct ixgb_tx_desc));
1564 /* Setup the HW Tx Head and Tail descriptor pointers */
1565 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1566 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1569 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1570 IXGB_READ_REG(&adapter->hw, TDBAL),
1571 IXGB_READ_REG(&adapter->hw, TDLEN));
1573 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1576 /* Program the Transmit Control Register */
1577 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1578 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1579 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1581 /* Setup Transmit Descriptor Settings for this adapter */
1582 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1584 if (adapter->tx_int_delay > 0)
1585 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1589 /*********************************************************************
1591 * Free all transmit related data structures.
1593 **********************************************************************/
1595 ixgb_free_transmit_structures(struct adapter * adapter)
1597 struct ixgb_buffer *tx_buffer;
1600 INIT_DEBUGOUT("free_transmit_structures: begin");
1602 if (adapter->tx_buffer_area != NULL) {
1603 tx_buffer = adapter->tx_buffer_area;
1604 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1605 if (tx_buffer->m_head != NULL) {
1606 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1607 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1608 m_freem(tx_buffer->m_head);
1610 tx_buffer->m_head = NULL;
1613 if (adapter->tx_buffer_area != NULL) {
1614 free(adapter->tx_buffer_area, M_DEVBUF);
1615 adapter->tx_buffer_area = NULL;
1617 if (adapter->txtag != NULL) {
1618 bus_dma_tag_destroy(adapter->txtag);
1619 adapter->txtag = NULL;
1624 /*********************************************************************
1626 * The offload context needs to be set when we transfer the first
1627 * packet of a particular protocol (TCP/UDP). We change the
1628 * context only if the protocol type changes.
1630 **********************************************************************/
1632 ixgb_transmit_checksum_setup(struct adapter * adapter,
1634 u_int8_t * txd_popts)
1636 struct ixgb_context_desc *TXD;
1637 struct ixgb_buffer *tx_buffer;
1640 if (mp->m_pkthdr.csum_flags) {
1642 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1643 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1644 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1647 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1648 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1649 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1650 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1653 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1664 * If we reach this point, the checksum offload context needs to be
1667 curr_txd = adapter->next_avail_tx_desc;
1668 tx_buffer = &adapter->tx_buffer_area[curr_txd];
1669 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1672 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1677 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1679 ENET_HEADER_SIZE + sizeof(struct ip) +
1680 offsetof(struct tcphdr, th_sum);
1681 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1683 ENET_HEADER_SIZE + sizeof(struct ip) +
1684 offsetof(struct udphdr, uh_sum);
1686 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1688 tx_buffer->m_head = NULL;
1690 if (++curr_txd == adapter->num_tx_desc)
1693 adapter->num_tx_desc_avail--;
1694 adapter->next_avail_tx_desc = curr_txd;
1698 /**********************************************************************
1700 * Examine each tx_buffer in the used queue. If the hardware is done
1701 * processing the packet then free associated resources. The
1702 * tx_buffer is put back on the free queue.
1704 **********************************************************************/
1706 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1709 struct ixgb_buffer *tx_buffer;
1710 struct ixgb_tx_desc *tx_desc;
1712 IXGB_LOCK_ASSERT(adapter);
1714 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1718 adapter->clean_tx_interrupts++;
1720 num_avail = adapter->num_tx_desc_avail;
1721 i = adapter->oldest_used_tx_desc;
1723 tx_buffer = &adapter->tx_buffer_area[i];
1724 tx_desc = &adapter->tx_desc_base[i];
1726 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1728 tx_desc->status = 0;
1731 if (tx_buffer->m_head) {
1732 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1733 BUS_DMASYNC_POSTWRITE);
1734 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1735 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1736 m_freem(tx_buffer->m_head);
1737 tx_buffer->m_head = NULL;
1739 if (++i == adapter->num_tx_desc)
1742 tx_buffer = &adapter->tx_buffer_area[i];
1743 tx_desc = &adapter->tx_desc_base[i];
1746 adapter->oldest_used_tx_desc = i;
1749 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1750 * it is OK to send packets. If there are no pending descriptors,
1751 * clear the timeout. Otherwise, if some descriptors have been freed,
1752 * restart the timeout.
1754 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1755 struct ifnet *ifp = adapter->ifp;
1757 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1758 if (num_avail == adapter->num_tx_desc)
1759 adapter->tx_timer = 0;
1760 else if (num_avail == adapter->num_tx_desc_avail)
1761 adapter->tx_timer = IXGB_TX_TIMEOUT;
1763 adapter->num_tx_desc_avail = num_avail;
1768 /*********************************************************************
1770 * Get a buffer from system mbuf buffer pool.
1772 **********************************************************************/
1774 ixgb_get_buf(int i, struct adapter * adapter,
1777 register struct mbuf *mp = nmp;
1778 struct ixgb_buffer *rx_buffer;
1787 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1790 adapter->mbuf_alloc_failed++;
1793 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1795 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1796 mp->m_data = mp->m_ext.ext_buf;
1800 if (ifp->if_mtu <= ETHERMTU) {
1801 m_adj(mp, ETHER_ALIGN);
1803 rx_buffer = &adapter->rx_buffer_area[i];
1806 * Using memory from the mbuf cluster pool, invoke the bus_dma
1807 * machinery to arrange the memory mapping.
1809 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1810 mtod(mp, void *), mp->m_len,
1811 ixgb_dmamap_cb, &paddr, 0);
1816 rx_buffer->m_head = mp;
1817 adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1818 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1823 /*********************************************************************
1825 * Allocate memory for rx_buffer structures. Since we use one
1826 * rx_buffer per received packet, the maximum number of rx_buffer's
1827 * that we'll need is equal to the number of receive descriptors
1828 * that we've allocated.
1830 **********************************************************************/
1832 ixgb_allocate_receive_structures(struct adapter * adapter)
1835 struct ixgb_buffer *rx_buffer;
1837 if (!(adapter->rx_buffer_area =
1838 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1839 adapter->num_rx_desc, M_DEVBUF,
1840 M_NOWAIT | M_ZERO))) {
1841 device_printf(adapter->dev,
1842 "Unable to allocate rx_buffer memory\n");
1845 bzero(adapter->rx_buffer_area,
1846 sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1848 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1849 PAGE_SIZE, 0, /* alignment, bounds */
1850 BUS_SPACE_MAXADDR, /* lowaddr */
1851 BUS_SPACE_MAXADDR, /* highaddr */
1852 NULL, NULL, /* filter, filterarg */
1853 MCLBYTES, /* maxsize */
1855 MCLBYTES, /* maxsegsize */
1856 BUS_DMA_ALLOCNOW, /* flags */
1857 #if __FreeBSD_version >= 502000
1858 NULL, /* lockfunc */
1859 NULL, /* lockfuncarg */
1863 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1864 "bus_dma_tag_create failed; error %u\n",
1868 rx_buffer = adapter->rx_buffer_area;
1869 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1870 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1873 device_printf(adapter->dev,
1874 "ixgb_allocate_receive_structures: "
1875 "bus_dmamap_create failed; error %u\n",
1881 for (i = 0; i < adapter->num_rx_desc; i++) {
1882 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1883 adapter->rx_buffer_area[i].m_head = NULL;
1884 adapter->rx_desc_base[i].buff_addr = 0;
1891 bus_dma_tag_destroy(adapter->rxtag);
1893 adapter->rxtag = NULL;
1894 free(adapter->rx_buffer_area, M_DEVBUF);
1895 adapter->rx_buffer_area = NULL;
1899 /*********************************************************************
1901 * Allocate and initialize receive structures.
1903 **********************************************************************/
1905 ixgb_setup_receive_structures(struct adapter * adapter)
1907 bzero((void *)adapter->rx_desc_base,
1908 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1910 if (ixgb_allocate_receive_structures(adapter))
1913 /* Setup our descriptor pointers */
1914 adapter->next_rx_desc_to_check = 0;
1915 adapter->next_rx_desc_to_use = 0;
1919 /*********************************************************************
1921 * Enable receive unit.
1923 **********************************************************************/
1925 ixgb_initialize_receive_unit(struct adapter * adapter)
1928 u_int32_t reg_rxcsum;
1929 u_int32_t reg_rxdctl;
1931 u_int64_t rdba = adapter->rxdma.dma_paddr;
1936 * Make sure receives are disabled while setting up the descriptor
1939 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1940 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1942 /* Set the Receive Delay Timer Register */
1943 IXGB_WRITE_REG(&adapter->hw, RDTR,
1944 adapter->rx_int_delay);
1947 /* Setup the Base and Length of the Rx Descriptor Ring */
1948 IXGB_WRITE_REG(&adapter->hw, RDBAL,
1949 (rdba & 0x00000000ffffffffULL));
1950 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1951 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1952 sizeof(struct ixgb_rx_desc));
1954 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1955 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1957 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1961 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1962 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1963 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1964 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1968 if (adapter->raidc) {
1970 uint8_t poll_threshold;
1971 #define IXGB_RAIDC_POLL_DEFAULT 120
1973 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1974 poll_threshold >>= 1;
1975 poll_threshold &= 0x3F;
1976 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1977 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1978 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1980 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1982 /* Enable Receive Checksum Offload for TCP and UDP ? */
1983 if (ifp->if_capenable & IFCAP_RXCSUM) {
1984 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1985 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1986 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1988 /* Setup the Receive Control Register */
1989 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1990 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1991 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1993 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1995 switch (adapter->rx_buffer_len) {
1997 case IXGB_RXBUFFER_2048:
1998 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2000 case IXGB_RXBUFFER_4096:
2001 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2003 case IXGB_RXBUFFER_8192:
2004 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2006 case IXGB_RXBUFFER_16384:
2007 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2011 reg_rctl |= IXGB_RCTL_RXEN;
2014 /* Enable Receives */
2015 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2020 /*********************************************************************
2022 * Free receive related data structures.
2024 **********************************************************************/
2026 ixgb_free_receive_structures(struct adapter * adapter)
2028 struct ixgb_buffer *rx_buffer;
2031 INIT_DEBUGOUT("free_receive_structures: begin");
2033 if (adapter->rx_buffer_area != NULL) {
2034 rx_buffer = adapter->rx_buffer_area;
2035 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2036 if (rx_buffer->map != NULL) {
2037 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2038 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2040 if (rx_buffer->m_head != NULL)
2041 m_freem(rx_buffer->m_head);
2042 rx_buffer->m_head = NULL;
2045 if (adapter->rx_buffer_area != NULL) {
2046 free(adapter->rx_buffer_area, M_DEVBUF);
2047 adapter->rx_buffer_area = NULL;
2049 if (adapter->rxtag != NULL) {
2050 bus_dma_tag_destroy(adapter->rxtag);
2051 adapter->rxtag = NULL;
2056 /*********************************************************************
2058 * This routine executes in interrupt context. It replenishes
2059 * the mbufs in the descriptor and sends data which has been
2060 * dma'ed into host memory to upper layer.
2062 * We loop at most count times if count is > 0, or until done if
2065 *********************************************************************/
2067 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2071 #if __FreeBSD_version < 500000
2072 struct ether_header *eh;
2076 u_int8_t accept_frame = 0;
2078 int next_to_use = 0;
2081 /* Pointer to the receive descriptor being examined. */
2082 struct ixgb_rx_desc *current_desc;
2084 IXGB_LOCK_ASSERT(adapter);
2087 i = adapter->next_rx_desc_to_check;
2088 next_to_use = adapter->next_rx_desc_to_use;
2089 eop_desc = adapter->next_rx_desc_to_check;
2090 current_desc = &adapter->rx_desc_base[i];
2092 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2094 adapter->no_pkts_avail++;
2098 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2100 mp = adapter->rx_buffer_area[i].m_head;
2101 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2102 BUS_DMASYNC_POSTREAD);
2104 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2110 len = current_desc->length;
2112 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2113 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2114 IXGB_RX_DESC_ERRORS_RXE)) {
2119 /* Assign correct length to the current fragment */
2122 if (adapter->fmp == NULL) {
2123 mp->m_pkthdr.len = len;
2124 adapter->fmp = mp; /* Store the first mbuf */
2127 /* Chain mbuf's together */
2128 mp->m_flags &= ~M_PKTHDR;
2129 adapter->lmp->m_next = mp;
2130 adapter->lmp = adapter->lmp->m_next;
2131 adapter->fmp->m_pkthdr.len += len;
2136 adapter->fmp->m_pkthdr.rcvif = ifp;
2138 #if __FreeBSD_version < 500000
2139 eh = mtod(adapter->fmp, struct ether_header *);
2141 /* Remove ethernet header from mbuf */
2142 m_adj(adapter->fmp, sizeof(struct ether_header));
2143 ixgb_receive_checksum(adapter, current_desc,
2146 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2147 VLAN_INPUT_TAG(eh, adapter->fmp,
2148 current_desc->special);
2150 ether_input(ifp, eh, adapter->fmp);
2152 ixgb_receive_checksum(adapter, current_desc,
2154 #if __FreeBSD_version < 700000
2155 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2156 VLAN_INPUT_TAG(ifp, adapter->fmp,
2157 current_desc->special);
2159 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2160 adapter->fmp->m_pkthdr.ether_vtag =
2161 current_desc->special;
2162 adapter->fmp->m_flags |= M_VLANTAG;
2166 if (adapter->fmp != NULL) {
2167 IXGB_UNLOCK(adapter);
2168 (*ifp->if_input) (ifp, adapter->fmp);
2173 adapter->fmp = NULL;
2174 adapter->lmp = NULL;
2176 adapter->rx_buffer_area[i].m_head = NULL;
2178 adapter->dropped_pkts++;
2179 if (adapter->fmp != NULL)
2180 m_freem(adapter->fmp);
2181 adapter->fmp = NULL;
2182 adapter->lmp = NULL;
2185 /* Zero out the receive descriptors status */
2186 current_desc->status = 0;
2188 /* Advance our pointers to the next descriptor */
2189 if (++i == adapter->num_rx_desc) {
2191 current_desc = adapter->rx_desc_base;
2195 adapter->next_rx_desc_to_check = i;
2198 i = (adapter->num_rx_desc - 1);
2201 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2202 * memory corruption). Avoid using and re-submitting the most recently received RX
2203 * descriptor back to hardware.
2205 * if(Last written back descriptor == EOP bit set descriptor)
2206 * then avoid re-submitting the most recently received RX descriptor
2208 * if(Last written back descriptor != EOP bit set descriptor)
2209 * then avoid re-submitting the most recently received RX descriptors
2210 * till last EOP bit set descriptor.
2212 if (eop_desc != i) {
2213 if (++eop_desc == adapter->num_rx_desc)
2217 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2218 while (next_to_use != i) {
2219 current_desc = &adapter->rx_desc_base[next_to_use];
2220 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2221 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2222 IXGB_RX_DESC_ERRORS_RXE))) {
2223 mp = adapter->rx_buffer_area[next_to_use].m_head;
2224 ixgb_get_buf(next_to_use, adapter, mp);
2226 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2229 /* Advance our pointers to the next descriptor */
2230 if (++next_to_use == adapter->num_rx_desc) {
2232 current_desc = adapter->rx_desc_base;
2236 adapter->next_rx_desc_to_use = next_to_use;
2237 if (--next_to_use < 0)
2238 next_to_use = (adapter->num_rx_desc - 1);
2239 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
2240 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2245 /*********************************************************************
2247 * Verify that the hardware indicated that the checksum is valid.
2248 * Inform the stack about the status of checksum so that stack
2249 * doesn't spend time verifying the checksum.
2251 *********************************************************************/
2253 ixgb_receive_checksum(struct adapter * adapter,
2254 struct ixgb_rx_desc * rx_desc,
2257 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2258 mp->m_pkthdr.csum_flags = 0;
2261 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2263 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2264 /* IP Checksum Good */
2265 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2266 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2269 mp->m_pkthdr.csum_flags = 0;
2272 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2274 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2275 mp->m_pkthdr.csum_flags |=
2276 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2277 mp->m_pkthdr.csum_data = htons(0xffff);
2285 ixgb_enable_vlans(struct adapter * adapter)
2289 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2290 ctrl |= IXGB_CTRL0_VME;
2291 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2298 ixgb_enable_intr(struct adapter * adapter)
2300 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2301 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2306 ixgb_disable_intr(struct adapter * adapter)
2308 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2313 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2317 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2321 /**********************************************************************
2323 * Update the board statistics counters.
2325 **********************************************************************/
2327 ixgb_update_stats_counters(struct adapter * adapter)
2331 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2332 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2333 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2334 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2335 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2336 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2337 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2338 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2339 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2340 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2342 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2343 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2344 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2345 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2346 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2347 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2348 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2349 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2350 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2351 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2352 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2353 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2354 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2355 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2356 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2357 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2358 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2359 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2360 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2361 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2362 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2363 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2364 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2365 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2366 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2367 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2368 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2370 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2371 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2372 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2373 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2374 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2375 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2376 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2377 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2378 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2379 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2380 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2381 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2382 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2383 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2384 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2385 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2386 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2387 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2388 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2389 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2390 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2391 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2395 /* Fill out the OS statistics structure */
2396 ifp->if_ipackets = adapter->stats.gprcl;
2397 ifp->if_opackets = adapter->stats.gptcl;
2398 ifp->if_ibytes = adapter->stats.gorcl;
2399 ifp->if_obytes = adapter->stats.gotcl;
2400 ifp->if_imcasts = adapter->stats.mprcl;
2401 ifp->if_collisions = 0;
2405 adapter->dropped_pkts +
2406 adapter->stats.crcerrs +
2407 adapter->stats.rnbc +
2408 adapter->stats.mpc +
2409 adapter->stats.rlec;
2415 /**********************************************************************
2417 * This routine is called only when ixgb_display_debug_stats is enabled.
2418 * This routine provides a way to take a look at important statistics
2419 * maintained by the driver and hardware.
2421 **********************************************************************/
2423 ixgb_print_hw_stats(struct adapter * adapter)
2425 char buf_speed[100], buf_type[100];
2426 ixgb_bus_speed bus_speed;
2427 ixgb_bus_type bus_type;
2432 device_printf(dev, "Packets not Avail = %ld\n",
2433 adapter->no_pkts_avail);
2434 device_printf(dev, "CleanTxInterrupts = %ld\n",
2435 adapter->clean_tx_interrupts);
2436 device_printf(dev, "ICR RXDMT0 = %lld\n",
2437 (long long)adapter->sv_stats.icr_rxdmt0);
2438 device_printf(dev, "ICR RXO = %lld\n",
2439 (long long)adapter->sv_stats.icr_rxo);
2440 device_printf(dev, "ICR RXT0 = %lld\n",
2441 (long long)adapter->sv_stats.icr_rxt0);
2442 device_printf(dev, "ICR TXDW = %lld\n",
2443 (long long)adapter->sv_stats.icr_TXDW);
2446 bus_speed = adapter->hw.bus.speed;
2447 bus_type = adapter->hw.bus.type;
2449 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2450 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2451 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2452 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2454 device_printf(dev, "PCI_Bus_Speed = %s\n",
2458 bus_type == ixgb_bus_type_pci ? "PCI" :
2459 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2461 device_printf(dev, "PCI_Bus_Type = %s\n",
2464 device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2465 adapter->no_tx_desc_avail1);
2466 device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2467 adapter->no_tx_desc_avail2);
2468 device_printf(dev, "Std Mbuf Failed = %ld\n",
2469 adapter->mbuf_alloc_failed);
2470 device_printf(dev, "Std Cluster Failed = %ld\n",
2471 adapter->mbuf_cluster_failed);
2473 device_printf(dev, "Defer count = %lld\n",
2474 (long long)adapter->stats.dc);
2475 device_printf(dev, "Missed Packets = %lld\n",
2476 (long long)adapter->stats.mpc);
2477 device_printf(dev, "Receive No Buffers = %lld\n",
2478 (long long)adapter->stats.rnbc);
2479 device_printf(dev, "Receive length errors = %lld\n",
2480 (long long)adapter->stats.rlec);
2481 device_printf(dev, "Crc errors = %lld\n",
2482 (long long)adapter->stats.crcerrs);
2483 device_printf(dev, "Driver dropped packets = %ld\n",
2484 adapter->dropped_pkts);
2486 device_printf(dev, "XON Rcvd = %lld\n",
2487 (long long)adapter->stats.xonrxc);
2488 device_printf(dev, "XON Xmtd = %lld\n",
2489 (long long)adapter->stats.xontxc);
2490 device_printf(dev, "XOFF Rcvd = %lld\n",
2491 (long long)adapter->stats.xoffrxc);
2492 device_printf(dev, "XOFF Xmtd = %lld\n",
2493 (long long)adapter->stats.xofftxc);
2495 device_printf(dev, "Good Packets Rcvd = %lld\n",
2496 (long long)adapter->stats.gprcl);
2497 device_printf(dev, "Good Packets Xmtd = %lld\n",
2498 (long long)adapter->stats.gptcl);
2500 device_printf(dev, "Jumbo frames recvd = %lld\n",
2501 (long long)adapter->stats.jprcl);
2502 device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2503 (long long)adapter->stats.jptcl);
2510 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2514 struct adapter *adapter;
2517 error = sysctl_handle_int(oidp, &result, 0, req);
2519 if (error || !req->newptr)
2523 adapter = (struct adapter *) arg1;
2524 ixgb_print_hw_stats(adapter);