1 /*******************************************************************************
3 Copyright (c) 2001-2004, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
40 #include <dev/ixgb/if_ixgb.h>
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgb_display_debug_stats = 0;
47 /*********************************************************************
48 * Linked list of board private structures for all NICs found
49 *********************************************************************/
51 struct adapter *ixgb_adapter_list = NULL;
55 /*********************************************************************
57 *********************************************************************/
59 char ixgb_driver_version[] = "1.0.6";
60 char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
62 /*********************************************************************
65 * Used by probe to select devices to load on
66 * Last field stores an index into ixgb_strings
67 * Last entry must be all 0s
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
74 /* Intel(R) PRO/10000 Network Connection */
75 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77 /* required last entry */
81 /*********************************************************************
82 * Table of branding strings for all supported NICs.
83 *********************************************************************/
85 static char *ixgb_strings[] = {
86 "Intel(R) PRO/10GbE Network Driver"
89 /*********************************************************************
91 *********************************************************************/
92 static int ixgb_probe(device_t);
93 static int ixgb_attach(device_t);
94 static int ixgb_detach(device_t);
95 static int ixgb_shutdown(device_t);
96 static void ixgb_intr(void *);
97 static void ixgb_start(struct ifnet *);
98 static void ixgb_start_locked(struct ifnet *);
99 static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void ixgb_watchdog(struct adapter *);
101 static void ixgb_init(void *);
102 static void ixgb_init_locked(struct adapter *);
103 static void ixgb_stop(void *);
104 static void ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int ixgb_media_change(struct ifnet *);
106 static void ixgb_identify_hardware(struct adapter *);
107 static int ixgb_allocate_pci_resources(struct adapter *);
108 static void ixgb_free_pci_resources(struct adapter *);
109 static void ixgb_local_timer(void *);
110 static int ixgb_hardware_init(struct adapter *);
111 static int ixgb_setup_interface(device_t, struct adapter *);
112 static int ixgb_setup_transmit_structures(struct adapter *);
113 static void ixgb_initialize_transmit_unit(struct adapter *);
114 static int ixgb_setup_receive_structures(struct adapter *);
115 static void ixgb_initialize_receive_unit(struct adapter *);
116 static void ixgb_enable_intr(struct adapter *);
117 static void ixgb_disable_intr(struct adapter *);
118 static void ixgb_free_transmit_structures(struct adapter *);
119 static void ixgb_free_receive_structures(struct adapter *);
120 static void ixgb_update_stats_counters(struct adapter *);
121 static void ixgb_clean_transmit_interrupts(struct adapter *);
122 static int ixgb_allocate_receive_structures(struct adapter *);
123 static int ixgb_allocate_transmit_structures(struct adapter *);
124 static int ixgb_process_receive_interrupts(struct adapter *, int);
126 ixgb_receive_checksum(struct adapter *,
127 struct ixgb_rx_desc * rx_desc,
130 ixgb_transmit_checksum_setup(struct adapter *,
133 static void ixgb_set_promisc(struct adapter *);
134 static void ixgb_disable_promisc(struct adapter *);
135 static void ixgb_set_multi(struct adapter *);
136 static void ixgb_print_hw_stats(struct adapter *);
137 static void ixgb_print_link_status(struct adapter *);
139 ixgb_get_buf(int i, struct adapter *,
141 static void ixgb_enable_vlans(struct adapter * adapter);
142 static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146 struct ixgb_dma_alloc *, int);
147 static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
152 /*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
156 static device_method_t ixgb_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, ixgb_probe),
159 DEVMETHOD(device_attach, ixgb_attach),
160 DEVMETHOD(device_detach, ixgb_detach),
161 DEVMETHOD(device_shutdown, ixgb_shutdown),
165 static driver_t ixgb_driver = {
166 "ixgb", ixgb_methods, sizeof(struct adapter),
169 static devclass_t ixgb_devclass;
170 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
172 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
173 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
175 /* some defines for controlling descriptor fetches in h/w */
176 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
177 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
178 * pushed this many descriptors from
180 #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */
183 /*********************************************************************
184 * Device identification routine
186 * ixgb_probe determines if the driver should be loaded on
187 * adapter based on PCI vendor/device id of the adapter.
189 * return 0 on success, positive on failure
190 *********************************************************************/
193 ixgb_probe(device_t dev)
195 ixgb_vendor_info_t *ent;
197 u_int16_t pci_vendor_id = 0;
198 u_int16_t pci_device_id = 0;
199 u_int16_t pci_subvendor_id = 0;
200 u_int16_t pci_subdevice_id = 0;
201 char adapter_name[60];
203 INIT_DEBUGOUT("ixgb_probe: begin");
205 pci_vendor_id = pci_get_vendor(dev);
206 if (pci_vendor_id != IXGB_VENDOR_ID)
209 pci_device_id = pci_get_device(dev);
210 pci_subvendor_id = pci_get_subvendor(dev);
211 pci_subdevice_id = pci_get_subdevice(dev);
213 ent = ixgb_vendor_info_array;
214 while (ent->vendor_id != 0) {
215 if ((pci_vendor_id == ent->vendor_id) &&
216 (pci_device_id == ent->device_id) &&
218 ((pci_subvendor_id == ent->subvendor_id) ||
219 (ent->subvendor_id == PCI_ANY_ID)) &&
221 ((pci_subdevice_id == ent->subdevice_id) ||
222 (ent->subdevice_id == PCI_ANY_ID))) {
223 sprintf(adapter_name, "%s, Version - %s",
224 ixgb_strings[ent->index],
225 ixgb_driver_version);
226 device_set_desc_copy(dev, adapter_name);
227 return (BUS_PROBE_DEFAULT);
235 /*********************************************************************
236 * Device initialization routine
238 * The attach entry point is called when the driver is being loaded.
239 * This routine identifies the type of hardware, allocates all resources
240 * and initializes the hardware.
242 * return 0 on success, positive on failure
243 *********************************************************************/
246 ixgb_attach(device_t dev)
248 struct adapter *adapter;
252 device_printf(dev, "%s\n", ixgb_copyright);
253 INIT_DEBUGOUT("ixgb_attach: begin");
255 /* Allocate, clear, and link in our adapter structure */
256 if (!(adapter = device_get_softc(dev))) {
257 device_printf(dev, "adapter structure allocation failed\n");
260 bzero(adapter, sizeof(struct adapter));
262 adapter->osdep.dev = dev;
263 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
265 if (ixgb_adapter_list != NULL)
266 ixgb_adapter_list->prev = adapter;
267 adapter->next = ixgb_adapter_list;
268 ixgb_adapter_list = adapter;
271 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
272 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
273 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
275 ixgb_sysctl_stats, "I", "Statistics");
277 callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
279 /* Determine hardware revision */
280 ixgb_identify_hardware(adapter);
282 /* Parameters (to be read from user) */
283 adapter->num_tx_desc = IXGB_MAX_TXD;
284 adapter->num_rx_desc = IXGB_MAX_RXD;
285 adapter->tx_int_delay = TIDV;
286 adapter->rx_int_delay = RDTR;
287 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
289 adapter->hw.fc.high_water = FCRTH;
290 adapter->hw.fc.low_water = FCRTL;
291 adapter->hw.fc.pause_time = FCPAUSE;
292 adapter->hw.fc.send_xon = TRUE;
293 adapter->hw.fc.type = FLOW_CONTROL;
296 /* Set the max frame size assuming standard ethernet sized frames */
297 adapter->hw.max_frame_size =
298 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
300 if (ixgb_allocate_pci_resources(adapter)) {
301 device_printf(dev, "Allocation of PCI resources failed\n");
305 tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
306 sizeof(struct ixgb_tx_desc), 4096);
308 /* Allocate Transmit Descriptor ring */
309 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
310 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
314 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
316 rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
317 sizeof(struct ixgb_rx_desc), 4096);
319 /* Allocate Receive Descriptor ring */
320 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
321 device_printf(dev, "Unable to allocate rx_desc memory\n");
325 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
327 /* Allocate multicast array memory. */
328 adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
329 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
330 if (adapter->mta == NULL) {
331 device_printf(dev, "Can not allocate multicast setup array\n");
336 /* Initialize the hardware */
337 if (ixgb_hardware_init(adapter)) {
338 device_printf(dev, "Unable to initialize the hardware\n");
342 /* Setup OS specific network interface */
343 if (ixgb_setup_interface(dev, adapter) != 0)
346 /* Initialize statistics */
347 ixgb_clear_hw_cntrs(&adapter->hw);
348 ixgb_update_stats_counters(adapter);
350 INIT_DEBUGOUT("ixgb_attach: end");
354 ixgb_dma_free(adapter, &adapter->rxdma);
356 ixgb_dma_free(adapter, &adapter->txdma);
359 if (adapter->ifp != NULL)
360 if_free(adapter->ifp);
361 ixgb_free_pci_resources(adapter);
362 sysctl_ctx_free(&adapter->sysctl_ctx);
363 free(adapter->mta, M_DEVBUF);
368 /*********************************************************************
369 * Device removal routine
371 * The detach entry point is called when the driver is being removed.
372 * This routine stops the adapter and deallocates all the resources
373 * that were allocated for driver operation.
375 * return 0 on success, positive on failure
376 *********************************************************************/
379 ixgb_detach(device_t dev)
381 struct adapter *adapter = device_get_softc(dev);
382 struct ifnet *ifp = adapter->ifp;
384 INIT_DEBUGOUT("ixgb_detach: begin");
386 #ifdef DEVICE_POLLING
387 if (ifp->if_capenable & IFCAP_POLLING)
388 ether_poll_deregister(ifp);
392 adapter->in_detach = 1;
395 IXGB_UNLOCK(adapter);
397 #if __FreeBSD_version < 500000
398 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
402 callout_drain(&adapter->timer);
403 ixgb_free_pci_resources(adapter);
404 #if __FreeBSD_version >= 500000
408 /* Free Transmit Descriptor ring */
409 if (adapter->tx_desc_base) {
410 ixgb_dma_free(adapter, &adapter->txdma);
411 adapter->tx_desc_base = NULL;
413 /* Free Receive Descriptor ring */
414 if (adapter->rx_desc_base) {
415 ixgb_dma_free(adapter, &adapter->rxdma);
416 adapter->rx_desc_base = NULL;
418 /* Remove from the adapter list */
419 if (ixgb_adapter_list == adapter)
420 ixgb_adapter_list = adapter->next;
421 if (adapter->next != NULL)
422 adapter->next->prev = adapter->prev;
423 if (adapter->prev != NULL)
424 adapter->prev->next = adapter->next;
425 free(adapter->mta, M_DEVBUF);
427 IXGB_LOCK_DESTROY(adapter);
431 /*********************************************************************
433 * Shutdown entry point
435 **********************************************************************/
438 ixgb_shutdown(device_t dev)
440 struct adapter *adapter = device_get_softc(dev);
443 IXGB_UNLOCK(adapter);
448 /*********************************************************************
449 * Transmit entry point
451 * ixgb_start is called by the stack to initiate a transmit.
452 * The driver will remain in this routine as long as there are
453 * packets to transmit and transmit resources are available.
454 * In case resources are not available stack is notified and
455 * the packet is requeued.
456 **********************************************************************/
459 ixgb_start_locked(struct ifnet * ifp)
462 struct adapter *adapter = ifp->if_softc;
464 IXGB_LOCK_ASSERT(adapter);
466 if (!adapter->link_active)
469 while (ifp->if_snd.ifq_head != NULL) {
470 IF_DEQUEUE(&ifp->if_snd, m_head);
475 if (ixgb_encap(adapter, m_head)) {
476 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
477 IF_PREPEND(&ifp->if_snd, m_head);
480 /* Send a copy of the frame to the BPF listener */
481 #if __FreeBSD_version < 500000
483 bpf_mtap(ifp, m_head);
485 ETHER_BPF_MTAP(ifp, m_head);
487 /* Set timeout in case hardware has problems transmitting */
488 adapter->tx_timer = IXGB_TX_TIMEOUT;
495 ixgb_start(struct ifnet *ifp)
497 struct adapter *adapter = ifp->if_softc;
500 ixgb_start_locked(ifp);
501 IXGB_UNLOCK(adapter);
505 /*********************************************************************
508 * ixgb_ioctl is called when the user wants to configure the
511 * return 0 on success, positive on failure
512 **********************************************************************/
515 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
518 struct ifreq *ifr = (struct ifreq *) data;
519 struct adapter *adapter = ifp->if_softc;
521 if (adapter->in_detach)
527 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
528 ether_ioctl(ifp, command, data);
531 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
532 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
536 ifp->if_mtu = ifr->ifr_mtu;
537 adapter->hw.max_frame_size =
538 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
540 ixgb_init_locked(adapter);
541 IXGB_UNLOCK(adapter);
545 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
547 if (ifp->if_flags & IFF_UP) {
548 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
549 ixgb_init_locked(adapter);
551 ixgb_disable_promisc(adapter);
552 ixgb_set_promisc(adapter);
554 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
558 IXGB_UNLOCK(adapter);
562 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
563 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565 ixgb_disable_intr(adapter);
566 ixgb_set_multi(adapter);
567 ixgb_enable_intr(adapter);
568 IXGB_UNLOCK(adapter);
573 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
574 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
577 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
578 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
579 #ifdef DEVICE_POLLING
580 if (mask & IFCAP_POLLING) {
581 if (ifr->ifr_reqcap & IFCAP_POLLING) {
582 error = ether_poll_register(ixgb_poll, ifp);
586 ixgb_disable_intr(adapter);
587 ifp->if_capenable |= IFCAP_POLLING;
588 IXGB_UNLOCK(adapter);
590 error = ether_poll_deregister(ifp);
591 /* Enable interrupt even in error case */
593 ixgb_enable_intr(adapter);
594 ifp->if_capenable &= ~IFCAP_POLLING;
595 IXGB_UNLOCK(adapter);
598 #endif /* DEVICE_POLLING */
599 if (mask & IFCAP_HWCSUM) {
600 if (IFCAP_HWCSUM & ifp->if_capenable)
601 ifp->if_capenable &= ~IFCAP_HWCSUM;
603 ifp->if_capenable |= IFCAP_HWCSUM;
604 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
609 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
617 /*********************************************************************
618 * Watchdog entry point
620 * This routine is called whenever hardware quits transmitting.
622 **********************************************************************/
625 ixgb_watchdog(struct adapter *adapter)
632 * If we are in this routine because of pause frames, then don't
633 * reset the hardware.
635 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
636 adapter->tx_timer = IXGB_TX_TIMEOUT;
639 if_printf(ifp, "watchdog timeout -- resetting\n");
642 ixgb_init_locked(adapter);
650 /*********************************************************************
653 * This routine is used in two ways. It is used by the stack as
654 * init entry point in network interface structure. It is also used
655 * by the driver as a hw/sw initialization routine to get to a
658 * return 0 on success, positive on failure
659 **********************************************************************/
662 ixgb_init_locked(struct adapter *adapter)
666 INIT_DEBUGOUT("ixgb_init: begin");
668 IXGB_LOCK_ASSERT(adapter);
673 /* Get the latest mac address, User can use a LAA */
674 bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
675 IXGB_ETH_LENGTH_OF_ADDRESS);
677 /* Initialize the hardware */
678 if (ixgb_hardware_init(adapter)) {
679 if_printf(ifp, "Unable to initialize the hardware\n");
682 ixgb_enable_vlans(adapter);
684 /* Prepare transmit descriptors and buffers */
685 if (ixgb_setup_transmit_structures(adapter)) {
686 if_printf(ifp, "Could not setup transmit structures\n");
690 ixgb_initialize_transmit_unit(adapter);
692 /* Setup Multicast table */
693 ixgb_set_multi(adapter);
695 /* Prepare receive descriptors and buffers */
696 if (ixgb_setup_receive_structures(adapter)) {
697 if_printf(ifp, "Could not setup receive structures\n");
701 ixgb_initialize_receive_unit(adapter);
703 /* Don't lose promiscuous settings */
704 ixgb_set_promisc(adapter);
707 ifp->if_drv_flags |= IFF_DRV_RUNNING;
708 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
711 if (ifp->if_capenable & IFCAP_TXCSUM)
712 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
714 ifp->if_hwassist = 0;
717 /* Enable jumbo frames */
718 if (ifp->if_mtu > ETHERMTU) {
720 IXGB_WRITE_REG(&adapter->hw, MFS,
721 adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
722 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
723 temp_reg |= IXGB_CTRL0_JFE;
724 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
726 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
727 ixgb_clear_hw_cntrs(&adapter->hw);
728 #ifdef DEVICE_POLLING
730 * Only disable interrupts if we are polling, make sure they are on
733 if (ifp->if_capenable & IFCAP_POLLING)
734 ixgb_disable_intr(adapter);
737 ixgb_enable_intr(adapter);
745 struct adapter *adapter = arg;
748 ixgb_init_locked(adapter);
749 IXGB_UNLOCK(adapter);
753 #ifdef DEVICE_POLLING
755 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
757 struct adapter *adapter = ifp->if_softc;
761 IXGB_LOCK_ASSERT(adapter);
763 if (cmd == POLL_AND_CHECK_STATUS) {
764 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
765 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
766 ixgb_check_for_link(&adapter->hw);
767 ixgb_print_link_status(adapter);
770 rx_npkts = ixgb_process_receive_interrupts(adapter, count);
771 ixgb_clean_transmit_interrupts(adapter);
773 if (ifp->if_snd.ifq_head != NULL)
774 ixgb_start_locked(ifp);
779 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
781 struct adapter *adapter = ifp->if_softc;
785 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
786 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
787 IXGB_UNLOCK(adapter);
790 #endif /* DEVICE_POLLING */
792 /*********************************************************************
794 * Interrupt Service routine
796 **********************************************************************/
801 u_int32_t loop_cnt = IXGB_MAX_INTR;
804 struct adapter *adapter = arg;
805 boolean_t rxdmt0 = FALSE;
811 #ifdef DEVICE_POLLING
812 if (ifp->if_capenable & IFCAP_POLLING) {
813 IXGB_UNLOCK(adapter);
818 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
820 IXGB_UNLOCK(adapter);
824 if (reg_icr & IXGB_INT_RXDMT0)
828 if (reg_icr & IXGB_INT_RXDMT0)
829 adapter->sv_stats.icr_rxdmt0++;
830 if (reg_icr & IXGB_INT_RXO)
831 adapter->sv_stats.icr_rxo++;
832 if (reg_icr & IXGB_INT_RXT0)
833 adapter->sv_stats.icr_rxt0++;
834 if (reg_icr & IXGB_INT_TXDW)
835 adapter->sv_stats.icr_TXDW++;
838 /* Link status change */
839 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
840 ixgb_check_for_link(&adapter->hw);
841 ixgb_print_link_status(adapter);
843 while (loop_cnt > 0) {
844 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
845 ixgb_process_receive_interrupts(adapter, -1);
846 ixgb_clean_transmit_interrupts(adapter);
851 if (rxdmt0 && adapter->raidc) {
852 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
853 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
855 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
856 ixgb_start_locked(ifp);
858 IXGB_UNLOCK(adapter);
863 /*********************************************************************
865 * Media Ioctl callback
867 * This routine is called whenever the user queries the status of
868 * the interface using ifconfig.
870 **********************************************************************/
872 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
874 struct adapter *adapter = ifp->if_softc;
876 INIT_DEBUGOUT("ixgb_media_status: begin");
878 ixgb_check_for_link(&adapter->hw);
879 ixgb_print_link_status(adapter);
881 ifmr->ifm_status = IFM_AVALID;
882 ifmr->ifm_active = IFM_ETHER;
884 if (!adapter->hw.link_up)
887 ifmr->ifm_status |= IFM_ACTIVE;
888 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
893 /*********************************************************************
895 * Media Ioctl callback
897 * This routine is called when the user changes speed/duplex using
898 * media/mediopt option with ifconfig.
900 **********************************************************************/
902 ixgb_media_change(struct ifnet * ifp)
904 struct adapter *adapter = ifp->if_softc;
905 struct ifmedia *ifm = &adapter->media;
907 INIT_DEBUGOUT("ixgb_media_change: begin");
909 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
915 /*********************************************************************
917 * This routine maps the mbufs to tx descriptors.
919 * return 0 on success, positive on failure
920 **********************************************************************/
923 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
926 int i, j, error, nsegs;
928 #if __FreeBSD_version < 500000
929 struct ifvlan *ifv = NULL;
931 bus_dma_segment_t segs[IXGB_MAX_SCATTER];
933 struct ixgb_buffer *tx_buffer = NULL;
934 struct ixgb_tx_desc *current_tx_desc = NULL;
935 struct ifnet *ifp = adapter->ifp;
938 * Force a cleanup if number of TX descriptors available hits the
941 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
942 ixgb_clean_transmit_interrupts(adapter);
944 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
945 adapter->no_tx_desc_avail1++;
949 * Map the packet for DMA.
951 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
952 adapter->no_tx_map_avail++;
955 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
956 &nsegs, BUS_DMA_NOWAIT);
958 adapter->no_tx_dma_setup++;
959 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
960 "error %u\n", error);
961 bus_dmamap_destroy(adapter->txtag, map);
964 KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
966 if (nsegs > adapter->num_tx_desc_avail) {
967 adapter->no_tx_desc_avail2++;
968 bus_dmamap_destroy(adapter->txtag, map);
971 if (ifp->if_hwassist > 0) {
972 ixgb_transmit_checksum_setup(adapter, m_head,
977 /* Find out if we are in vlan mode */
978 #if __FreeBSD_version < 500000
979 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
980 m_head->m_pkthdr.rcvif != NULL &&
981 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
982 ifv = m_head->m_pkthdr.rcvif->if_softc;
983 #elseif __FreeBSD_version < 700000
984 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
986 i = adapter->next_avail_tx_desc;
987 for (j = 0; j < nsegs; j++) {
988 tx_buffer = &adapter->tx_buffer_area[i];
989 current_tx_desc = &adapter->tx_desc_base[i];
991 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
992 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
993 current_tx_desc->popts = txd_popts;
994 if (++i == adapter->num_tx_desc)
997 tx_buffer->m_head = NULL;
1000 adapter->num_tx_desc_avail -= nsegs;
1001 adapter->next_avail_tx_desc = i;
1003 #if __FreeBSD_version < 500000
1005 /* Set the vlan id */
1006 current_tx_desc->vlan = ifv->ifv_tag;
1007 #elseif __FreeBSD_version < 700000
1009 /* Set the vlan id */
1010 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1012 if (m_head->m_flags & M_VLANTAG) {
1013 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1016 /* Tell hardware to add tag */
1017 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1019 tx_buffer->m_head = m_head;
1020 tx_buffer->map = map;
1021 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1024 * Last Descriptor of Packet needs End Of Packet (EOP)
1026 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1029 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1030 * that this frame is available to transmit.
1032 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1038 ixgb_set_promisc(struct adapter * adapter)
1042 struct ifnet *ifp = adapter->ifp;
1044 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1046 if (ifp->if_flags & IFF_PROMISC) {
1047 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1048 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1049 } else if (ifp->if_flags & IFF_ALLMULTI) {
1050 reg_rctl |= IXGB_RCTL_MPE;
1051 reg_rctl &= ~IXGB_RCTL_UPE;
1052 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1058 ixgb_disable_promisc(struct adapter * adapter)
1062 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1064 reg_rctl &= (~IXGB_RCTL_UPE);
1065 reg_rctl &= (~IXGB_RCTL_MPE);
1066 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1072 /*********************************************************************
1075 * This routine is called whenever multicast address list is updated.
1077 **********************************************************************/
1080 ixgb_set_multi(struct adapter * adapter)
1082 u_int32_t reg_rctl = 0;
1084 struct ifmultiaddr *ifma;
1086 struct ifnet *ifp = adapter->ifp;
1088 IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1091 bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1092 MAX_NUM_MULTICAST_ADDRESSES);
1094 if_maddr_rlock(ifp);
1095 #if __FreeBSD_version < 500000
1096 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1098 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1100 if (ifma->ifma_addr->sa_family != AF_LINK)
1103 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1104 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1107 if_maddr_runlock(ifp);
1109 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1110 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1111 reg_rctl |= IXGB_RCTL_MPE;
1112 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1114 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1120 /*********************************************************************
1123 * This routine checks for link status and updates statistics.
1125 **********************************************************************/
1128 ixgb_local_timer(void *arg)
1131 struct adapter *adapter = arg;
1134 IXGB_LOCK_ASSERT(adapter);
1136 ixgb_check_for_link(&adapter->hw);
1137 ixgb_print_link_status(adapter);
1138 ixgb_update_stats_counters(adapter);
1139 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1140 ixgb_print_hw_stats(adapter);
1142 if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1143 ixgb_watchdog(adapter);
1144 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1148 ixgb_print_link_status(struct adapter * adapter)
1150 if (adapter->hw.link_up) {
1151 if (!adapter->link_active) {
1152 if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1155 adapter->link_active = 1;
1158 if (adapter->link_active) {
1159 if_printf(adapter->ifp, "Link is Down \n");
1160 adapter->link_active = 0;
1169 /*********************************************************************
1171 * This routine disables all traffic on the adapter by issuing a
1172 * global reset on the MAC and deallocates TX/RX buffers.
1174 **********************************************************************/
1177 ixgb_stop(void *arg)
1180 struct adapter *adapter = arg;
1183 IXGB_LOCK_ASSERT(adapter);
1185 INIT_DEBUGOUT("ixgb_stop: begin\n");
1186 ixgb_disable_intr(adapter);
1187 adapter->hw.adapter_stopped = FALSE;
1188 ixgb_adapter_stop(&adapter->hw);
1189 callout_stop(&adapter->timer);
1190 ixgb_free_transmit_structures(adapter);
1191 ixgb_free_receive_structures(adapter);
1193 /* Tell the stack that the interface is no longer active */
1194 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1195 adapter->tx_timer = 0;
1201 /*********************************************************************
1203 * Determine hardware revision.
1205 **********************************************************************/
1207 ixgb_identify_hardware(struct adapter * adapter)
1209 device_t dev = adapter->dev;
1211 /* Make sure our PCI config space has the necessary stuff set */
1212 pci_enable_busmaster(dev);
1213 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1215 /* Save off the information about this board */
1216 adapter->hw.vendor_id = pci_get_vendor(dev);
1217 adapter->hw.device_id = pci_get_device(dev);
1218 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1219 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1220 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1222 /* Set MacType, etc. based on this PCI info */
1223 switch (adapter->hw.device_id) {
1224 case IXGB_DEVICE_ID_82597EX:
1225 case IXGB_DEVICE_ID_82597EX_SR:
1226 adapter->hw.mac_type = ixgb_82597;
1229 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1230 device_printf(dev, "unsupported device id 0x%x\n",
1231 adapter->hw.device_id);
1238 ixgb_allocate_pci_resources(struct adapter * adapter)
1241 device_t dev = adapter->dev;
1244 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1247 if (!(adapter->res_memory)) {
1248 device_printf(dev, "Unable to allocate bus resource: memory\n");
1251 adapter->osdep.mem_bus_space_tag =
1252 rman_get_bustag(adapter->res_memory);
1253 adapter->osdep.mem_bus_space_handle =
1254 rman_get_bushandle(adapter->res_memory);
1255 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1258 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1260 RF_SHAREABLE | RF_ACTIVE);
1261 if (!(adapter->res_interrupt)) {
1263 "Unable to allocate bus resource: interrupt\n");
1266 if (bus_setup_intr(dev, adapter->res_interrupt,
1267 INTR_TYPE_NET | INTR_MPSAFE,
1268 NULL, (void (*) (void *))ixgb_intr, adapter,
1269 &adapter->int_handler_tag)) {
1270 device_printf(dev, "Error registering interrupt handler!\n");
1273 adapter->hw.back = &adapter->osdep;
1279 ixgb_free_pci_resources(struct adapter * adapter)
1281 device_t dev = adapter->dev;
1283 if (adapter->res_interrupt != NULL) {
1284 bus_teardown_intr(dev, adapter->res_interrupt,
1285 adapter->int_handler_tag);
1286 bus_release_resource(dev, SYS_RES_IRQ, 0,
1287 adapter->res_interrupt);
1289 if (adapter->res_memory != NULL) {
1290 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1291 adapter->res_memory);
1293 if (adapter->res_ioport != NULL) {
1294 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1295 adapter->res_ioport);
1300 /*********************************************************************
1302 * Initialize the hardware to a configuration as specified by the
1303 * adapter structure. The controller is reset, the EEPROM is
1304 * verified, the MAC address is set, then the shared initialization
1305 * routines are called.
1307 **********************************************************************/
1309 ixgb_hardware_init(struct adapter * adapter)
1311 /* Issue a global reset */
1312 adapter->hw.adapter_stopped = FALSE;
1313 ixgb_adapter_stop(&adapter->hw);
1315 /* Make sure we have a good EEPROM before we read from it */
1316 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1317 device_printf(adapter->dev,
1318 "The EEPROM Checksum Is Not Valid\n");
1321 if (!ixgb_init_hw(&adapter->hw)) {
1322 device_printf(adapter->dev, "Hardware Initialization Failed");
1329 /*********************************************************************
1331 * Setup networking device structure and register an interface.
1333 **********************************************************************/
1335 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1338 INIT_DEBUGOUT("ixgb_setup_interface: begin");
1340 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1342 device_printf(dev, "can not allocate ifnet structure\n");
1345 #if __FreeBSD_version >= 502000
1346 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1348 ifp->if_unit = device_get_unit(dev);
1349 ifp->if_name = "ixgb";
1351 ifp->if_mtu = ETHERMTU;
1352 ifp->if_baudrate = 1000000000;
1353 ifp->if_init = ixgb_init;
1354 ifp->if_softc = adapter;
1355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1356 ifp->if_ioctl = ixgb_ioctl;
1357 ifp->if_start = ixgb_start;
1358 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1360 #if __FreeBSD_version < 500000
1361 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1363 ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1366 ifp->if_capabilities = IFCAP_HWCSUM;
1369 * Tell the upper layer(s) we support long frames.
1371 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1373 #if __FreeBSD_version >= 500000
1374 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1377 ifp->if_capenable = ifp->if_capabilities;
1379 #ifdef DEVICE_POLLING
1380 ifp->if_capabilities |= IFCAP_POLLING;
1384 * Specify the media types supported by this adapter and register
1385 * callbacks to update media and link information
1387 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1389 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1391 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1393 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1394 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1399 /********************************************************************
1400 * Manage DMA'able memory.
1401 *******************************************************************/
1403 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1407 *(bus_addr_t *) arg = segs->ds_addr;
1412 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1413 struct ixgb_dma_alloc * dma, int mapflags)
1419 r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1420 PAGE_SIZE, 0, /* alignment, bounds */
1421 BUS_SPACE_MAXADDR, /* lowaddr */
1422 BUS_SPACE_MAXADDR, /* highaddr */
1423 NULL, NULL, /* filter, filterarg */
1426 size, /* maxsegsize */
1427 BUS_DMA_ALLOCNOW, /* flags */
1428 #if __FreeBSD_version >= 502000
1429 NULL, /* lockfunc */
1430 NULL, /* lockfuncarg */
1434 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1438 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1439 BUS_DMA_NOWAIT, &dma->dma_map);
1441 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1445 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1449 mapflags | BUS_DMA_NOWAIT);
1451 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1455 dma->dma_size = size;
1458 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1460 bus_dma_tag_destroy(dma->dma_tag);
1462 dma->dma_map = NULL;
1463 dma->dma_tag = NULL;
1470 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1472 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1473 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1474 bus_dma_tag_destroy(dma->dma_tag);
1477 /*********************************************************************
1479 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1480 * the information needed to transmit a packet on the wire.
1482 **********************************************************************/
1484 ixgb_allocate_transmit_structures(struct adapter * adapter)
1486 if (!(adapter->tx_buffer_area =
1487 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1488 adapter->num_tx_desc, M_DEVBUF,
1489 M_NOWAIT | M_ZERO))) {
1490 device_printf(adapter->dev,
1491 "Unable to allocate tx_buffer memory\n");
1494 bzero(adapter->tx_buffer_area,
1495 sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1500 /*********************************************************************
1502 * Allocate and initialize transmit structures.
1504 **********************************************************************/
1506 ixgb_setup_transmit_structures(struct adapter * adapter)
1509 * Setup DMA descriptor areas.
1511 if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1512 PAGE_SIZE, 0, /* alignment, bounds */
1513 BUS_SPACE_MAXADDR, /* lowaddr */
1514 BUS_SPACE_MAXADDR, /* highaddr */
1515 NULL, NULL, /* filter, filterarg */
1516 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */
1517 IXGB_MAX_SCATTER, /* nsegments */
1518 MCLBYTES, /* maxsegsize */
1519 BUS_DMA_ALLOCNOW, /* flags */
1520 #if __FreeBSD_version >= 502000
1521 NULL, /* lockfunc */
1522 NULL, /* lockfuncarg */
1525 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1528 if (ixgb_allocate_transmit_structures(adapter))
1531 bzero((void *)adapter->tx_desc_base,
1532 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1534 adapter->next_avail_tx_desc = 0;
1535 adapter->oldest_used_tx_desc = 0;
1537 /* Set number of descriptors available */
1538 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1540 /* Set checksum context */
1541 adapter->active_checksum_context = OFFLOAD_NONE;
1546 /*********************************************************************
1548 * Enable transmit unit.
1550 **********************************************************************/
1552 ixgb_initialize_transmit_unit(struct adapter * adapter)
1555 u_int64_t tdba = adapter->txdma.dma_paddr;
1557 /* Setup the Base and Length of the Tx Descriptor Ring */
1558 IXGB_WRITE_REG(&adapter->hw, TDBAL,
1559 (tdba & 0x00000000ffffffffULL));
1560 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1561 IXGB_WRITE_REG(&adapter->hw, TDLEN,
1562 adapter->num_tx_desc *
1563 sizeof(struct ixgb_tx_desc));
1565 /* Setup the HW Tx Head and Tail descriptor pointers */
1566 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1567 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1570 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1571 IXGB_READ_REG(&adapter->hw, TDBAL),
1572 IXGB_READ_REG(&adapter->hw, TDLEN));
1574 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1577 /* Program the Transmit Control Register */
1578 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1579 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1580 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1582 /* Setup Transmit Descriptor Settings for this adapter */
1583 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1585 if (adapter->tx_int_delay > 0)
1586 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1590 /*********************************************************************
1592 * Free all transmit related data structures.
1594 **********************************************************************/
1596 ixgb_free_transmit_structures(struct adapter * adapter)
1598 struct ixgb_buffer *tx_buffer;
1601 INIT_DEBUGOUT("free_transmit_structures: begin");
1603 if (adapter->tx_buffer_area != NULL) {
1604 tx_buffer = adapter->tx_buffer_area;
1605 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1606 if (tx_buffer->m_head != NULL) {
1607 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1608 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1609 m_freem(tx_buffer->m_head);
1611 tx_buffer->m_head = NULL;
1614 if (adapter->tx_buffer_area != NULL) {
1615 free(adapter->tx_buffer_area, M_DEVBUF);
1616 adapter->tx_buffer_area = NULL;
1618 if (adapter->txtag != NULL) {
1619 bus_dma_tag_destroy(adapter->txtag);
1620 adapter->txtag = NULL;
1625 /*********************************************************************
1627 * The offload context needs to be set when we transfer the first
1628 * packet of a particular protocol (TCP/UDP). We change the
1629 * context only if the protocol type changes.
1631 **********************************************************************/
1633 ixgb_transmit_checksum_setup(struct adapter * adapter,
1635 u_int8_t * txd_popts)
1637 struct ixgb_context_desc *TXD;
1638 struct ixgb_buffer *tx_buffer;
1641 if (mp->m_pkthdr.csum_flags) {
1643 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1644 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1645 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1648 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1649 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1650 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1654 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1665 * If we reach this point, the checksum offload context needs to be
1668 curr_txd = adapter->next_avail_tx_desc;
1669 tx_buffer = &adapter->tx_buffer_area[curr_txd];
1670 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1673 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1678 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1680 ENET_HEADER_SIZE + sizeof(struct ip) +
1681 offsetof(struct tcphdr, th_sum);
1682 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1684 ENET_HEADER_SIZE + sizeof(struct ip) +
1685 offsetof(struct udphdr, uh_sum);
1687 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1689 tx_buffer->m_head = NULL;
1691 if (++curr_txd == adapter->num_tx_desc)
1694 adapter->num_tx_desc_avail--;
1695 adapter->next_avail_tx_desc = curr_txd;
1699 /**********************************************************************
1701 * Examine each tx_buffer in the used queue. If the hardware is done
1702 * processing the packet then free associated resources. The
1703 * tx_buffer is put back on the free queue.
1705 **********************************************************************/
1707 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1710 struct ixgb_buffer *tx_buffer;
1711 struct ixgb_tx_desc *tx_desc;
1713 IXGB_LOCK_ASSERT(adapter);
1715 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1719 adapter->clean_tx_interrupts++;
1721 num_avail = adapter->num_tx_desc_avail;
1722 i = adapter->oldest_used_tx_desc;
1724 tx_buffer = &adapter->tx_buffer_area[i];
1725 tx_desc = &adapter->tx_desc_base[i];
1727 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1729 tx_desc->status = 0;
1732 if (tx_buffer->m_head) {
1733 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1734 BUS_DMASYNC_POSTWRITE);
1735 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1736 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1737 m_freem(tx_buffer->m_head);
1738 tx_buffer->m_head = NULL;
1740 if (++i == adapter->num_tx_desc)
1743 tx_buffer = &adapter->tx_buffer_area[i];
1744 tx_desc = &adapter->tx_desc_base[i];
1747 adapter->oldest_used_tx_desc = i;
1750 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1751 * it is OK to send packets. If there are no pending descriptors,
1752 * clear the timeout. Otherwise, if some descriptors have been freed,
1753 * restart the timeout.
1755 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1756 struct ifnet *ifp = adapter->ifp;
1758 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1759 if (num_avail == adapter->num_tx_desc)
1760 adapter->tx_timer = 0;
1761 else if (num_avail == adapter->num_tx_desc_avail)
1762 adapter->tx_timer = IXGB_TX_TIMEOUT;
1764 adapter->num_tx_desc_avail = num_avail;
1769 /*********************************************************************
1771 * Get a buffer from system mbuf buffer pool.
1773 **********************************************************************/
1775 ixgb_get_buf(int i, struct adapter * adapter,
1778 register struct mbuf *mp = nmp;
1779 struct ixgb_buffer *rx_buffer;
1788 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1791 adapter->mbuf_alloc_failed++;
1794 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1796 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797 mp->m_data = mp->m_ext.ext_buf;
1801 if (ifp->if_mtu <= ETHERMTU) {
1802 m_adj(mp, ETHER_ALIGN);
1804 rx_buffer = &adapter->rx_buffer_area[i];
1807 * Using memory from the mbuf cluster pool, invoke the bus_dma
1808 * machinery to arrange the memory mapping.
1810 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1811 mtod(mp, void *), mp->m_len,
1812 ixgb_dmamap_cb, &paddr, 0);
1817 rx_buffer->m_head = mp;
1818 adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1819 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1824 /*********************************************************************
1826 * Allocate memory for rx_buffer structures. Since we use one
1827 * rx_buffer per received packet, the maximum number of rx_buffer's
1828 * that we'll need is equal to the number of receive descriptors
1829 * that we've allocated.
1831 **********************************************************************/
1833 ixgb_allocate_receive_structures(struct adapter * adapter)
1836 struct ixgb_buffer *rx_buffer;
1838 if (!(adapter->rx_buffer_area =
1839 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1840 adapter->num_rx_desc, M_DEVBUF,
1841 M_NOWAIT | M_ZERO))) {
1842 device_printf(adapter->dev,
1843 "Unable to allocate rx_buffer memory\n");
1846 bzero(adapter->rx_buffer_area,
1847 sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1849 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1850 PAGE_SIZE, 0, /* alignment, bounds */
1851 BUS_SPACE_MAXADDR, /* lowaddr */
1852 BUS_SPACE_MAXADDR, /* highaddr */
1853 NULL, NULL, /* filter, filterarg */
1854 MCLBYTES, /* maxsize */
1856 MCLBYTES, /* maxsegsize */
1857 BUS_DMA_ALLOCNOW, /* flags */
1858 #if __FreeBSD_version >= 502000
1859 NULL, /* lockfunc */
1860 NULL, /* lockfuncarg */
1864 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1865 "bus_dma_tag_create failed; error %u\n",
1869 rx_buffer = adapter->rx_buffer_area;
1870 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1871 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1874 device_printf(adapter->dev,
1875 "ixgb_allocate_receive_structures: "
1876 "bus_dmamap_create failed; error %u\n",
1882 for (i = 0; i < adapter->num_rx_desc; i++) {
1883 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1884 adapter->rx_buffer_area[i].m_head = NULL;
1885 adapter->rx_desc_base[i].buff_addr = 0;
1892 bus_dma_tag_destroy(adapter->rxtag);
1894 adapter->rxtag = NULL;
1895 free(adapter->rx_buffer_area, M_DEVBUF);
1896 adapter->rx_buffer_area = NULL;
1900 /*********************************************************************
1902 * Allocate and initialize receive structures.
1904 **********************************************************************/
1906 ixgb_setup_receive_structures(struct adapter * adapter)
1908 bzero((void *)adapter->rx_desc_base,
1909 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1911 if (ixgb_allocate_receive_structures(adapter))
1914 /* Setup our descriptor pointers */
1915 adapter->next_rx_desc_to_check = 0;
1916 adapter->next_rx_desc_to_use = 0;
1920 /*********************************************************************
1922 * Enable receive unit.
1924 **********************************************************************/
1926 ixgb_initialize_receive_unit(struct adapter * adapter)
1929 u_int32_t reg_rxcsum;
1930 u_int32_t reg_rxdctl;
1932 u_int64_t rdba = adapter->rxdma.dma_paddr;
1937 * Make sure receives are disabled while setting up the descriptor
1940 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1941 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1943 /* Set the Receive Delay Timer Register */
1944 IXGB_WRITE_REG(&adapter->hw, RDTR,
1945 adapter->rx_int_delay);
1948 /* Setup the Base and Length of the Rx Descriptor Ring */
1949 IXGB_WRITE_REG(&adapter->hw, RDBAL,
1950 (rdba & 0x00000000ffffffffULL));
1951 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1952 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1953 sizeof(struct ixgb_rx_desc));
1955 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1956 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1958 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1962 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1963 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1964 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1965 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1969 if (adapter->raidc) {
1971 uint8_t poll_threshold;
1972 #define IXGB_RAIDC_POLL_DEFAULT 120
1974 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1975 poll_threshold >>= 1;
1976 poll_threshold &= 0x3F;
1977 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1978 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1979 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1981 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1983 /* Enable Receive Checksum Offload for TCP and UDP ? */
1984 if (ifp->if_capenable & IFCAP_RXCSUM) {
1985 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1986 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1987 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1989 /* Setup the Receive Control Register */
1990 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1991 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1992 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1994 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1996 switch (adapter->rx_buffer_len) {
1998 case IXGB_RXBUFFER_2048:
1999 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2001 case IXGB_RXBUFFER_4096:
2002 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2004 case IXGB_RXBUFFER_8192:
2005 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2007 case IXGB_RXBUFFER_16384:
2008 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2012 reg_rctl |= IXGB_RCTL_RXEN;
2015 /* Enable Receives */
2016 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2021 /*********************************************************************
2023 * Free receive related data structures.
2025 **********************************************************************/
2027 ixgb_free_receive_structures(struct adapter * adapter)
2029 struct ixgb_buffer *rx_buffer;
2032 INIT_DEBUGOUT("free_receive_structures: begin");
2034 if (adapter->rx_buffer_area != NULL) {
2035 rx_buffer = adapter->rx_buffer_area;
2036 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2037 if (rx_buffer->map != NULL) {
2038 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2039 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2041 if (rx_buffer->m_head != NULL)
2042 m_freem(rx_buffer->m_head);
2043 rx_buffer->m_head = NULL;
2046 if (adapter->rx_buffer_area != NULL) {
2047 free(adapter->rx_buffer_area, M_DEVBUF);
2048 adapter->rx_buffer_area = NULL;
2050 if (adapter->rxtag != NULL) {
2051 bus_dma_tag_destroy(adapter->rxtag);
2052 adapter->rxtag = NULL;
2057 /*********************************************************************
2059 * This routine executes in interrupt context. It replenishes
2060 * the mbufs in the descriptor and sends data which has been
2061 * dma'ed into host memory to upper layer.
2063 * We loop at most count times if count is > 0, or until done if
2066 *********************************************************************/
2068 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2072 #if __FreeBSD_version < 500000
2073 struct ether_header *eh;
2077 u_int8_t accept_frame = 0;
2079 int next_to_use = 0;
2082 /* Pointer to the receive descriptor being examined. */
2083 struct ixgb_rx_desc *current_desc;
2085 IXGB_LOCK_ASSERT(adapter);
2088 i = adapter->next_rx_desc_to_check;
2089 next_to_use = adapter->next_rx_desc_to_use;
2090 eop_desc = adapter->next_rx_desc_to_check;
2091 current_desc = &adapter->rx_desc_base[i];
2093 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2095 adapter->no_pkts_avail++;
2099 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2101 mp = adapter->rx_buffer_area[i].m_head;
2102 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2103 BUS_DMASYNC_POSTREAD);
2105 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2111 len = current_desc->length;
2113 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2114 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2115 IXGB_RX_DESC_ERRORS_RXE)) {
2120 /* Assign correct length to the current fragment */
2123 if (adapter->fmp == NULL) {
2124 mp->m_pkthdr.len = len;
2125 adapter->fmp = mp; /* Store the first mbuf */
2128 /* Chain mbuf's together */
2129 mp->m_flags &= ~M_PKTHDR;
2130 adapter->lmp->m_next = mp;
2131 adapter->lmp = adapter->lmp->m_next;
2132 adapter->fmp->m_pkthdr.len += len;
2137 adapter->fmp->m_pkthdr.rcvif = ifp;
2139 #if __FreeBSD_version < 500000
2140 eh = mtod(adapter->fmp, struct ether_header *);
2142 /* Remove ethernet header from mbuf */
2143 m_adj(adapter->fmp, sizeof(struct ether_header));
2144 ixgb_receive_checksum(adapter, current_desc,
2147 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2148 VLAN_INPUT_TAG(eh, adapter->fmp,
2149 current_desc->special);
2151 ether_input(ifp, eh, adapter->fmp);
2153 ixgb_receive_checksum(adapter, current_desc,
2155 #if __FreeBSD_version < 700000
2156 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2157 VLAN_INPUT_TAG(ifp, adapter->fmp,
2158 current_desc->special);
2160 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2161 adapter->fmp->m_pkthdr.ether_vtag =
2162 current_desc->special;
2163 adapter->fmp->m_flags |= M_VLANTAG;
2167 if (adapter->fmp != NULL) {
2168 IXGB_UNLOCK(adapter);
2169 (*ifp->if_input) (ifp, adapter->fmp);
2174 adapter->fmp = NULL;
2175 adapter->lmp = NULL;
2177 adapter->rx_buffer_area[i].m_head = NULL;
2179 adapter->dropped_pkts++;
2180 if (adapter->fmp != NULL)
2181 m_freem(adapter->fmp);
2182 adapter->fmp = NULL;
2183 adapter->lmp = NULL;
2186 /* Zero out the receive descriptors status */
2187 current_desc->status = 0;
2189 /* Advance our pointers to the next descriptor */
2190 if (++i == adapter->num_rx_desc) {
2192 current_desc = adapter->rx_desc_base;
2196 adapter->next_rx_desc_to_check = i;
2199 i = (adapter->num_rx_desc - 1);
2202 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2203 * memory corruption). Avoid using and re-submitting the most recently received RX
2204 * descriptor back to hardware.
2206 * if(Last written back descriptor == EOP bit set descriptor)
2207 * then avoid re-submitting the most recently received RX descriptor
2209 * if(Last written back descriptor != EOP bit set descriptor)
2210 * then avoid re-submitting the most recently received RX descriptors
2211 * till last EOP bit set descriptor.
2213 if (eop_desc != i) {
2214 if (++eop_desc == adapter->num_rx_desc)
2218 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2219 while (next_to_use != i) {
2220 current_desc = &adapter->rx_desc_base[next_to_use];
2221 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2222 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2223 IXGB_RX_DESC_ERRORS_RXE))) {
2224 mp = adapter->rx_buffer_area[next_to_use].m_head;
2225 ixgb_get_buf(next_to_use, adapter, mp);
2227 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2230 /* Advance our pointers to the next descriptor */
2231 if (++next_to_use == adapter->num_rx_desc) {
2233 current_desc = adapter->rx_desc_base;
2237 adapter->next_rx_desc_to_use = next_to_use;
2238 if (--next_to_use < 0)
2239 next_to_use = (adapter->num_rx_desc - 1);
2240 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
2241 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2246 /*********************************************************************
2248 * Verify that the hardware indicated that the checksum is valid.
2249 * Inform the stack about the status of checksum so that stack
2250 * doesn't spend time verifying the checksum.
2252 *********************************************************************/
2254 ixgb_receive_checksum(struct adapter * adapter,
2255 struct ixgb_rx_desc * rx_desc,
2258 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2259 mp->m_pkthdr.csum_flags = 0;
2262 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2264 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2265 /* IP Checksum Good */
2266 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2267 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2270 mp->m_pkthdr.csum_flags = 0;
2273 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2275 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2276 mp->m_pkthdr.csum_flags |=
2277 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2278 mp->m_pkthdr.csum_data = htons(0xffff);
2286 ixgb_enable_vlans(struct adapter * adapter)
2290 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2291 ctrl |= IXGB_CTRL0_VME;
2292 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2299 ixgb_enable_intr(struct adapter * adapter)
2301 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2302 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2307 ixgb_disable_intr(struct adapter * adapter)
2309 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2314 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2318 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2322 /**********************************************************************
2324 * Update the board statistics counters.
2326 **********************************************************************/
2328 ixgb_update_stats_counters(struct adapter * adapter)
2332 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2333 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2334 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2335 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2336 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2337 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2338 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2339 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2340 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2341 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2343 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2344 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2345 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2346 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2347 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2348 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2349 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2350 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2351 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2352 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2353 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2354 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2355 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2356 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2357 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2358 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2359 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2360 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2361 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2362 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2363 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2364 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2365 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2366 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2367 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2368 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2369 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2371 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2372 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2373 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2374 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2375 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2376 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2377 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2378 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2379 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2380 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2381 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2382 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2383 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2384 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2385 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2386 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2387 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2388 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2389 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2390 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2391 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2392 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2396 /* Fill out the OS statistics structure */
2397 ifp->if_ipackets = adapter->stats.gprcl;
2398 ifp->if_opackets = adapter->stats.gptcl;
2399 ifp->if_ibytes = adapter->stats.gorcl;
2400 ifp->if_obytes = adapter->stats.gotcl;
2401 ifp->if_imcasts = adapter->stats.mprcl;
2402 ifp->if_collisions = 0;
2406 adapter->dropped_pkts +
2407 adapter->stats.crcerrs +
2408 adapter->stats.rnbc +
2409 adapter->stats.mpc +
2410 adapter->stats.rlec;
2416 /**********************************************************************
2418 * This routine is called only when ixgb_display_debug_stats is enabled.
2419 * This routine provides a way to take a look at important statistics
2420 * maintained by the driver and hardware.
2422 **********************************************************************/
2424 ixgb_print_hw_stats(struct adapter * adapter)
2426 char buf_speed[100], buf_type[100];
2427 ixgb_bus_speed bus_speed;
2428 ixgb_bus_type bus_type;
2433 device_printf(dev, "Packets not Avail = %ld\n",
2434 adapter->no_pkts_avail);
2435 device_printf(dev, "CleanTxInterrupts = %ld\n",
2436 adapter->clean_tx_interrupts);
2437 device_printf(dev, "ICR RXDMT0 = %lld\n",
2438 (long long)adapter->sv_stats.icr_rxdmt0);
2439 device_printf(dev, "ICR RXO = %lld\n",
2440 (long long)adapter->sv_stats.icr_rxo);
2441 device_printf(dev, "ICR RXT0 = %lld\n",
2442 (long long)adapter->sv_stats.icr_rxt0);
2443 device_printf(dev, "ICR TXDW = %lld\n",
2444 (long long)adapter->sv_stats.icr_TXDW);
2447 bus_speed = adapter->hw.bus.speed;
2448 bus_type = adapter->hw.bus.type;
2450 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2451 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2452 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2453 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2455 device_printf(dev, "PCI_Bus_Speed = %s\n",
2459 bus_type == ixgb_bus_type_pci ? "PCI" :
2460 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2462 device_printf(dev, "PCI_Bus_Type = %s\n",
2465 device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2466 adapter->no_tx_desc_avail1);
2467 device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2468 adapter->no_tx_desc_avail2);
2469 device_printf(dev, "Std Mbuf Failed = %ld\n",
2470 adapter->mbuf_alloc_failed);
2471 device_printf(dev, "Std Cluster Failed = %ld\n",
2472 adapter->mbuf_cluster_failed);
2474 device_printf(dev, "Defer count = %lld\n",
2475 (long long)adapter->stats.dc);
2476 device_printf(dev, "Missed Packets = %lld\n",
2477 (long long)adapter->stats.mpc);
2478 device_printf(dev, "Receive No Buffers = %lld\n",
2479 (long long)adapter->stats.rnbc);
2480 device_printf(dev, "Receive length errors = %lld\n",
2481 (long long)adapter->stats.rlec);
2482 device_printf(dev, "Crc errors = %lld\n",
2483 (long long)adapter->stats.crcerrs);
2484 device_printf(dev, "Driver dropped packets = %ld\n",
2485 adapter->dropped_pkts);
2487 device_printf(dev, "XON Rcvd = %lld\n",
2488 (long long)adapter->stats.xonrxc);
2489 device_printf(dev, "XON Xmtd = %lld\n",
2490 (long long)adapter->stats.xontxc);
2491 device_printf(dev, "XOFF Rcvd = %lld\n",
2492 (long long)adapter->stats.xoffrxc);
2493 device_printf(dev, "XOFF Xmtd = %lld\n",
2494 (long long)adapter->stats.xofftxc);
2496 device_printf(dev, "Good Packets Rcvd = %lld\n",
2497 (long long)adapter->stats.gprcl);
2498 device_printf(dev, "Good Packets Xmtd = %lld\n",
2499 (long long)adapter->stats.gptcl);
2501 device_printf(dev, "Jumbo frames recvd = %lld\n",
2502 (long long)adapter->stats.jprcl);
2503 device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2504 (long long)adapter->stats.jptcl);
2511 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2515 struct adapter *adapter;
2518 error = sysctl_handle_int(oidp, &result, 0, req);
2520 if (error || !req->newptr)
2524 adapter = (struct adapter *) arg1;
2525 ixgb_print_hw_stats(adapter);