1 /*******************************************************************************
3 Copyright (c) 2001-2004, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
40 #include <dev/ixgb/if_ixgb.h>
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int ixgb_display_debug_stats = 0;
47 /*********************************************************************
48 * Linked list of board private structures for all NICs found
49 *********************************************************************/
51 struct adapter *ixgb_adapter_list = NULL;
55 /*********************************************************************
57 *********************************************************************/
59 char ixgb_driver_version[] = "1.0.6";
60 char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation.";
62 /*********************************************************************
65 * Used by probe to select devices to load on
66 * Last field stores an index into ixgb_strings
67 * Last entry must be all 0s
69 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
70 *********************************************************************/
72 static ixgb_vendor_info_t ixgb_vendor_info_array[] =
74 /* Intel(R) PRO/10000 Network Connection */
75 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0},
76 {IXGB_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0},
77 /* required last entry */
81 /*********************************************************************
82 * Table of branding strings for all supported NICs.
83 *********************************************************************/
85 static char *ixgb_strings[] = {
86 "Intel(R) PRO/10GbE Network Driver"
89 /*********************************************************************
91 *********************************************************************/
92 static int ixgb_probe(device_t);
93 static int ixgb_attach(device_t);
94 static int ixgb_detach(device_t);
95 static int ixgb_shutdown(device_t);
96 static void ixgb_intr(void *);
97 static void ixgb_start(struct ifnet *);
98 static void ixgb_start_locked(struct ifnet *);
99 static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
100 static void ixgb_watchdog(struct adapter *);
101 static void ixgb_init(void *);
102 static void ixgb_init_locked(struct adapter *);
103 static void ixgb_stop(void *);
104 static void ixgb_media_status(struct ifnet *, struct ifmediareq *);
105 static int ixgb_media_change(struct ifnet *);
106 static void ixgb_identify_hardware(struct adapter *);
107 static int ixgb_allocate_pci_resources(struct adapter *);
108 static void ixgb_free_pci_resources(struct adapter *);
109 static void ixgb_local_timer(void *);
110 static int ixgb_hardware_init(struct adapter *);
111 static int ixgb_setup_interface(device_t, struct adapter *);
112 static int ixgb_setup_transmit_structures(struct adapter *);
113 static void ixgb_initialize_transmit_unit(struct adapter *);
114 static int ixgb_setup_receive_structures(struct adapter *);
115 static void ixgb_initialize_receive_unit(struct adapter *);
116 static void ixgb_enable_intr(struct adapter *);
117 static void ixgb_disable_intr(struct adapter *);
118 static void ixgb_free_transmit_structures(struct adapter *);
119 static void ixgb_free_receive_structures(struct adapter *);
120 static void ixgb_update_stats_counters(struct adapter *);
121 static void ixgb_clean_transmit_interrupts(struct adapter *);
122 static int ixgb_allocate_receive_structures(struct adapter *);
123 static int ixgb_allocate_transmit_structures(struct adapter *);
124 static int ixgb_process_receive_interrupts(struct adapter *, int);
126 ixgb_receive_checksum(struct adapter *,
127 struct ixgb_rx_desc * rx_desc,
130 ixgb_transmit_checksum_setup(struct adapter *,
133 static void ixgb_set_promisc(struct adapter *);
134 static void ixgb_disable_promisc(struct adapter *);
135 static void ixgb_set_multi(struct adapter *);
136 static void ixgb_print_hw_stats(struct adapter *);
137 static void ixgb_print_link_status(struct adapter *);
139 ixgb_get_buf(int i, struct adapter *,
141 static void ixgb_enable_vlans(struct adapter * adapter);
142 static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head);
143 static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS);
145 ixgb_dma_malloc(struct adapter *, bus_size_t,
146 struct ixgb_dma_alloc *, int);
147 static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *);
148 #ifdef DEVICE_POLLING
149 static poll_handler_t ixgb_poll;
152 /*********************************************************************
153 * FreeBSD Device Interface Entry Points
154 *********************************************************************/
156 static device_method_t ixgb_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, ixgb_probe),
159 DEVMETHOD(device_attach, ixgb_attach),
160 DEVMETHOD(device_detach, ixgb_detach),
161 DEVMETHOD(device_shutdown, ixgb_shutdown),
166 static driver_t ixgb_driver = {
167 "ixgb", ixgb_methods, sizeof(struct adapter),
170 static devclass_t ixgb_devclass;
171 DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0);
173 MODULE_DEPEND(ixgb, pci, 1, 1, 1);
174 MODULE_DEPEND(ixgb, ether, 1, 1, 1);
176 /* some defines for controlling descriptor fetches in h/w */
177 #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
178 #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
179 * pushed this many descriptors from
181 #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */
184 /*********************************************************************
185 * Device identification routine
187 * ixgb_probe determines if the driver should be loaded on
188 * adapter based on PCI vendor/device id of the adapter.
190 * return 0 on success, positive on failure
191 *********************************************************************/
194 ixgb_probe(device_t dev)
196 ixgb_vendor_info_t *ent;
198 u_int16_t pci_vendor_id = 0;
199 u_int16_t pci_device_id = 0;
200 u_int16_t pci_subvendor_id = 0;
201 u_int16_t pci_subdevice_id = 0;
202 char adapter_name[60];
204 INIT_DEBUGOUT("ixgb_probe: begin");
206 pci_vendor_id = pci_get_vendor(dev);
207 if (pci_vendor_id != IXGB_VENDOR_ID)
210 pci_device_id = pci_get_device(dev);
211 pci_subvendor_id = pci_get_subvendor(dev);
212 pci_subdevice_id = pci_get_subdevice(dev);
214 ent = ixgb_vendor_info_array;
215 while (ent->vendor_id != 0) {
216 if ((pci_vendor_id == ent->vendor_id) &&
217 (pci_device_id == ent->device_id) &&
219 ((pci_subvendor_id == ent->subvendor_id) ||
220 (ent->subvendor_id == PCI_ANY_ID)) &&
222 ((pci_subdevice_id == ent->subdevice_id) ||
223 (ent->subdevice_id == PCI_ANY_ID))) {
224 sprintf(adapter_name, "%s, Version - %s",
225 ixgb_strings[ent->index],
226 ixgb_driver_version);
227 device_set_desc_copy(dev, adapter_name);
228 return (BUS_PROBE_DEFAULT);
236 /*********************************************************************
237 * Device initialization routine
239 * The attach entry point is called when the driver is being loaded.
240 * This routine identifies the type of hardware, allocates all resources
241 * and initializes the hardware.
243 * return 0 on success, positive on failure
244 *********************************************************************/
247 ixgb_attach(device_t dev)
249 struct adapter *adapter;
253 device_printf(dev, "%s\n", ixgb_copyright);
254 INIT_DEBUGOUT("ixgb_attach: begin");
256 /* Allocate, clear, and link in our adapter structure */
257 if (!(adapter = device_get_softc(dev))) {
258 device_printf(dev, "adapter structure allocation failed\n");
261 bzero(adapter, sizeof(struct adapter));
263 adapter->osdep.dev = dev;
264 IXGB_LOCK_INIT(adapter, device_get_nameunit(dev));
266 if (ixgb_adapter_list != NULL)
267 ixgb_adapter_list->prev = adapter;
268 adapter->next = ixgb_adapter_list;
269 ixgb_adapter_list = adapter;
272 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
273 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
274 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
276 ixgb_sysctl_stats, "I", "Statistics");
278 callout_init_mtx(&adapter->timer, &adapter->mtx, 0);
280 /* Determine hardware revision */
281 ixgb_identify_hardware(adapter);
283 /* Parameters (to be read from user) */
284 adapter->num_tx_desc = IXGB_MAX_TXD;
285 adapter->num_rx_desc = IXGB_MAX_RXD;
286 adapter->tx_int_delay = TIDV;
287 adapter->rx_int_delay = RDTR;
288 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
290 adapter->hw.fc.high_water = FCRTH;
291 adapter->hw.fc.low_water = FCRTL;
292 adapter->hw.fc.pause_time = FCPAUSE;
293 adapter->hw.fc.send_xon = TRUE;
294 adapter->hw.fc.type = FLOW_CONTROL;
297 /* Set the max frame size assuming standard ethernet sized frames */
298 adapter->hw.max_frame_size =
299 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
301 if (ixgb_allocate_pci_resources(adapter)) {
302 device_printf(dev, "Allocation of PCI resources failed\n");
306 tsize = IXGB_ROUNDUP(adapter->num_tx_desc *
307 sizeof(struct ixgb_tx_desc), 4096);
309 /* Allocate Transmit Descriptor ring */
310 if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
311 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
315 adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr;
317 rsize = IXGB_ROUNDUP(adapter->num_rx_desc *
318 sizeof(struct ixgb_rx_desc), 4096);
320 /* Allocate Receive Descriptor ring */
321 if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
322 device_printf(dev, "Unable to allocate rx_desc memory\n");
326 adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr;
328 /* Allocate multicast array memory. */
329 adapter->mta = malloc(sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
330 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
331 if (adapter->mta == NULL) {
332 device_printf(dev, "Can not allocate multicast setup array\n");
337 /* Initialize the hardware */
338 if (ixgb_hardware_init(adapter)) {
339 device_printf(dev, "Unable to initialize the hardware\n");
343 /* Setup OS specific network interface */
344 if (ixgb_setup_interface(dev, adapter) != 0)
347 /* Initialize statistics */
348 ixgb_clear_hw_cntrs(&adapter->hw);
349 ixgb_update_stats_counters(adapter);
351 INIT_DEBUGOUT("ixgb_attach: end");
355 ixgb_dma_free(adapter, &adapter->rxdma);
357 ixgb_dma_free(adapter, &adapter->txdma);
360 if (adapter->ifp != NULL)
361 if_free(adapter->ifp);
362 ixgb_free_pci_resources(adapter);
363 sysctl_ctx_free(&adapter->sysctl_ctx);
364 free(adapter->mta, M_DEVBUF);
369 /*********************************************************************
370 * Device removal routine
372 * The detach entry point is called when the driver is being removed.
373 * This routine stops the adapter and deallocates all the resources
374 * that were allocated for driver operation.
376 * return 0 on success, positive on failure
377 *********************************************************************/
380 ixgb_detach(device_t dev)
382 struct adapter *adapter = device_get_softc(dev);
383 struct ifnet *ifp = adapter->ifp;
385 INIT_DEBUGOUT("ixgb_detach: begin");
387 #ifdef DEVICE_POLLING
388 if (ifp->if_capenable & IFCAP_POLLING)
389 ether_poll_deregister(ifp);
393 adapter->in_detach = 1;
396 IXGB_UNLOCK(adapter);
398 #if __FreeBSD_version < 500000
399 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
403 callout_drain(&adapter->timer);
404 ixgb_free_pci_resources(adapter);
405 #if __FreeBSD_version >= 500000
409 /* Free Transmit Descriptor ring */
410 if (adapter->tx_desc_base) {
411 ixgb_dma_free(adapter, &adapter->txdma);
412 adapter->tx_desc_base = NULL;
414 /* Free Receive Descriptor ring */
415 if (adapter->rx_desc_base) {
416 ixgb_dma_free(adapter, &adapter->rxdma);
417 adapter->rx_desc_base = NULL;
419 /* Remove from the adapter list */
420 if (ixgb_adapter_list == adapter)
421 ixgb_adapter_list = adapter->next;
422 if (adapter->next != NULL)
423 adapter->next->prev = adapter->prev;
424 if (adapter->prev != NULL)
425 adapter->prev->next = adapter->next;
426 free(adapter->mta, M_DEVBUF);
428 IXGB_LOCK_DESTROY(adapter);
432 /*********************************************************************
434 * Shutdown entry point
436 **********************************************************************/
439 ixgb_shutdown(device_t dev)
441 struct adapter *adapter = device_get_softc(dev);
444 IXGB_UNLOCK(adapter);
449 /*********************************************************************
450 * Transmit entry point
452 * ixgb_start is called by the stack to initiate a transmit.
453 * The driver will remain in this routine as long as there are
454 * packets to transmit and transmit resources are available.
455 * In case resources are not available stack is notified and
456 * the packet is requeued.
457 **********************************************************************/
460 ixgb_start_locked(struct ifnet * ifp)
463 struct adapter *adapter = ifp->if_softc;
465 IXGB_LOCK_ASSERT(adapter);
467 if (!adapter->link_active)
470 while (ifp->if_snd.ifq_head != NULL) {
471 IF_DEQUEUE(&ifp->if_snd, m_head);
476 if (ixgb_encap(adapter, m_head)) {
477 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
478 IF_PREPEND(&ifp->if_snd, m_head);
481 /* Send a copy of the frame to the BPF listener */
482 #if __FreeBSD_version < 500000
484 bpf_mtap(ifp, m_head);
486 ETHER_BPF_MTAP(ifp, m_head);
488 /* Set timeout in case hardware has problems transmitting */
489 adapter->tx_timer = IXGB_TX_TIMEOUT;
496 ixgb_start(struct ifnet *ifp)
498 struct adapter *adapter = ifp->if_softc;
501 ixgb_start_locked(ifp);
502 IXGB_UNLOCK(adapter);
506 /*********************************************************************
509 * ixgb_ioctl is called when the user wants to configure the
512 * return 0 on success, positive on failure
513 **********************************************************************/
516 ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data)
519 struct ifreq *ifr = (struct ifreq *) data;
520 struct adapter *adapter = ifp->if_softc;
522 if (adapter->in_detach)
528 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
529 ether_ioctl(ifp, command, data);
532 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
533 if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
537 ifp->if_mtu = ifr->ifr_mtu;
538 adapter->hw.max_frame_size =
539 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
541 ixgb_init_locked(adapter);
542 IXGB_UNLOCK(adapter);
546 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
548 if (ifp->if_flags & IFF_UP) {
549 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
550 ixgb_init_locked(adapter);
552 ixgb_disable_promisc(adapter);
553 ixgb_set_promisc(adapter);
555 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
559 IXGB_UNLOCK(adapter);
563 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
564 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
566 ixgb_disable_intr(adapter);
567 ixgb_set_multi(adapter);
568 ixgb_enable_intr(adapter);
569 IXGB_UNLOCK(adapter);
574 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
575 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
578 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
579 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
580 #ifdef DEVICE_POLLING
581 if (mask & IFCAP_POLLING) {
582 if (ifr->ifr_reqcap & IFCAP_POLLING) {
583 error = ether_poll_register(ixgb_poll, ifp);
587 ixgb_disable_intr(adapter);
588 ifp->if_capenable |= IFCAP_POLLING;
589 IXGB_UNLOCK(adapter);
591 error = ether_poll_deregister(ifp);
592 /* Enable interrupt even in error case */
594 ixgb_enable_intr(adapter);
595 ifp->if_capenable &= ~IFCAP_POLLING;
596 IXGB_UNLOCK(adapter);
599 #endif /* DEVICE_POLLING */
600 if (mask & IFCAP_HWCSUM) {
601 if (IFCAP_HWCSUM & ifp->if_capenable)
602 ifp->if_capenable &= ~IFCAP_HWCSUM;
604 ifp->if_capenable |= IFCAP_HWCSUM;
605 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
610 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command);
618 /*********************************************************************
619 * Watchdog entry point
621 * This routine is called whenever hardware quits transmitting.
623 **********************************************************************/
626 ixgb_watchdog(struct adapter *adapter)
633 * If we are in this routine because of pause frames, then don't
634 * reset the hardware.
636 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) {
637 adapter->tx_timer = IXGB_TX_TIMEOUT;
640 if_printf(ifp, "watchdog timeout -- resetting\n");
643 ixgb_init_locked(adapter);
651 /*********************************************************************
654 * This routine is used in two ways. It is used by the stack as
655 * init entry point in network interface structure. It is also used
656 * by the driver as a hw/sw initialization routine to get to a
659 * return 0 on success, positive on failure
660 **********************************************************************/
663 ixgb_init_locked(struct adapter *adapter)
667 INIT_DEBUGOUT("ixgb_init: begin");
669 IXGB_LOCK_ASSERT(adapter);
674 /* Get the latest mac address, User can use a LAA */
675 bcopy(IF_LLADDR(ifp), adapter->hw.curr_mac_addr,
676 IXGB_ETH_LENGTH_OF_ADDRESS);
678 /* Initialize the hardware */
679 if (ixgb_hardware_init(adapter)) {
680 if_printf(ifp, "Unable to initialize the hardware\n");
683 ixgb_enable_vlans(adapter);
685 /* Prepare transmit descriptors and buffers */
686 if (ixgb_setup_transmit_structures(adapter)) {
687 if_printf(ifp, "Could not setup transmit structures\n");
691 ixgb_initialize_transmit_unit(adapter);
693 /* Setup Multicast table */
694 ixgb_set_multi(adapter);
696 /* Prepare receive descriptors and buffers */
697 if (ixgb_setup_receive_structures(adapter)) {
698 if_printf(ifp, "Could not setup receive structures\n");
702 ixgb_initialize_receive_unit(adapter);
704 /* Don't lose promiscuous settings */
705 ixgb_set_promisc(adapter);
708 ifp->if_drv_flags |= IFF_DRV_RUNNING;
709 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
712 if (ifp->if_capenable & IFCAP_TXCSUM)
713 ifp->if_hwassist = IXGB_CHECKSUM_FEATURES;
715 ifp->if_hwassist = 0;
718 /* Enable jumbo frames */
719 if (ifp->if_mtu > ETHERMTU) {
721 IXGB_WRITE_REG(&adapter->hw, MFS,
722 adapter->hw.max_frame_size << IXGB_MFS_SHIFT);
723 temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0);
724 temp_reg |= IXGB_CTRL0_JFE;
725 IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg);
727 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
728 ixgb_clear_hw_cntrs(&adapter->hw);
729 #ifdef DEVICE_POLLING
731 * Only disable interrupts if we are polling, make sure they are on
734 if (ifp->if_capenable & IFCAP_POLLING)
735 ixgb_disable_intr(adapter);
738 ixgb_enable_intr(adapter);
746 struct adapter *adapter = arg;
749 ixgb_init_locked(adapter);
750 IXGB_UNLOCK(adapter);
754 #ifdef DEVICE_POLLING
756 ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count)
758 struct adapter *adapter = ifp->if_softc;
762 IXGB_LOCK_ASSERT(adapter);
764 if (cmd == POLL_AND_CHECK_STATUS) {
765 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
766 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
767 ixgb_check_for_link(&adapter->hw);
768 ixgb_print_link_status(adapter);
771 rx_npkts = ixgb_process_receive_interrupts(adapter, count);
772 ixgb_clean_transmit_interrupts(adapter);
774 if (ifp->if_snd.ifq_head != NULL)
775 ixgb_start_locked(ifp);
780 ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count)
782 struct adapter *adapter = ifp->if_softc;
786 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
787 rx_npkts = ixgb_poll_locked(ifp, cmd, count);
788 IXGB_UNLOCK(adapter);
791 #endif /* DEVICE_POLLING */
793 /*********************************************************************
795 * Interrupt Service routine
797 **********************************************************************/
802 u_int32_t loop_cnt = IXGB_MAX_INTR;
805 struct adapter *adapter = arg;
806 boolean_t rxdmt0 = FALSE;
812 #ifdef DEVICE_POLLING
813 if (ifp->if_capenable & IFCAP_POLLING) {
814 IXGB_UNLOCK(adapter);
819 reg_icr = IXGB_READ_REG(&adapter->hw, ICR);
821 IXGB_UNLOCK(adapter);
825 if (reg_icr & IXGB_INT_RXDMT0)
829 if (reg_icr & IXGB_INT_RXDMT0)
830 adapter->sv_stats.icr_rxdmt0++;
831 if (reg_icr & IXGB_INT_RXO)
832 adapter->sv_stats.icr_rxo++;
833 if (reg_icr & IXGB_INT_RXT0)
834 adapter->sv_stats.icr_rxt0++;
835 if (reg_icr & IXGB_INT_TXDW)
836 adapter->sv_stats.icr_TXDW++;
839 /* Link status change */
840 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
841 ixgb_check_for_link(&adapter->hw);
842 ixgb_print_link_status(adapter);
844 while (loop_cnt > 0) {
845 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
846 ixgb_process_receive_interrupts(adapter, -1);
847 ixgb_clean_transmit_interrupts(adapter);
852 if (rxdmt0 && adapter->raidc) {
853 IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0);
854 IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0);
856 if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL)
857 ixgb_start_locked(ifp);
859 IXGB_UNLOCK(adapter);
864 /*********************************************************************
866 * Media Ioctl callback
868 * This routine is called whenever the user queries the status of
869 * the interface using ifconfig.
871 **********************************************************************/
873 ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
875 struct adapter *adapter = ifp->if_softc;
877 INIT_DEBUGOUT("ixgb_media_status: begin");
879 ixgb_check_for_link(&adapter->hw);
880 ixgb_print_link_status(adapter);
882 ifmr->ifm_status = IFM_AVALID;
883 ifmr->ifm_active = IFM_ETHER;
885 if (!adapter->hw.link_up)
888 ifmr->ifm_status |= IFM_ACTIVE;
889 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
894 /*********************************************************************
896 * Media Ioctl callback
898 * This routine is called when the user changes speed/duplex using
899 * media/mediopt option with ifconfig.
901 **********************************************************************/
903 ixgb_media_change(struct ifnet * ifp)
905 struct adapter *adapter = ifp->if_softc;
906 struct ifmedia *ifm = &adapter->media;
908 INIT_DEBUGOUT("ixgb_media_change: begin");
910 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
916 /*********************************************************************
918 * This routine maps the mbufs to tx descriptors.
920 * return 0 on success, positive on failure
921 **********************************************************************/
924 ixgb_encap(struct adapter * adapter, struct mbuf * m_head)
927 int i, j, error, nsegs;
929 #if __FreeBSD_version < 500000
930 struct ifvlan *ifv = NULL;
932 bus_dma_segment_t segs[IXGB_MAX_SCATTER];
934 struct ixgb_buffer *tx_buffer = NULL;
935 struct ixgb_tx_desc *current_tx_desc = NULL;
936 struct ifnet *ifp = adapter->ifp;
939 * Force a cleanup if number of TX descriptors available hits the
942 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
943 ixgb_clean_transmit_interrupts(adapter);
945 if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
946 adapter->no_tx_desc_avail1++;
950 * Map the packet for DMA.
952 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) {
953 adapter->no_tx_map_avail++;
956 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs,
957 &nsegs, BUS_DMA_NOWAIT);
959 adapter->no_tx_dma_setup++;
960 if_printf(ifp, "ixgb_encap: bus_dmamap_load_mbuf failed; "
961 "error %u\n", error);
962 bus_dmamap_destroy(adapter->txtag, map);
965 KASSERT(nsegs != 0, ("ixgb_encap: empty packet"));
967 if (nsegs > adapter->num_tx_desc_avail) {
968 adapter->no_tx_desc_avail2++;
969 bus_dmamap_destroy(adapter->txtag, map);
972 if (ifp->if_hwassist > 0) {
973 ixgb_transmit_checksum_setup(adapter, m_head,
978 /* Find out if we are in vlan mode */
979 #if __FreeBSD_version < 500000
980 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
981 m_head->m_pkthdr.rcvif != NULL &&
982 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
983 ifv = m_head->m_pkthdr.rcvif->if_softc;
984 #elseif __FreeBSD_version < 700000
985 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
987 i = adapter->next_avail_tx_desc;
988 for (j = 0; j < nsegs; j++) {
989 tx_buffer = &adapter->tx_buffer_area[i];
990 current_tx_desc = &adapter->tx_desc_base[i];
992 current_tx_desc->buff_addr = htole64(segs[j].ds_addr);
993 current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len);
994 current_tx_desc->popts = txd_popts;
995 if (++i == adapter->num_tx_desc)
998 tx_buffer->m_head = NULL;
1001 adapter->num_tx_desc_avail -= nsegs;
1002 adapter->next_avail_tx_desc = i;
1004 #if __FreeBSD_version < 500000
1006 /* Set the vlan id */
1007 current_tx_desc->vlan = ifv->ifv_tag;
1008 #elseif __FreeBSD_version < 700000
1010 /* Set the vlan id */
1011 current_tx_desc->vlan = VLAN_TAG_VALUE(mtag);
1013 if (m_head->m_flags & M_VLANTAG) {
1014 current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
1017 /* Tell hardware to add tag */
1018 current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1020 tx_buffer->m_head = m_head;
1021 tx_buffer->map = map;
1022 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1025 * Last Descriptor of Packet needs End Of Packet (EOP)
1027 current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP);
1030 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1031 * that this frame is available to transmit.
1033 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1039 ixgb_set_promisc(struct adapter * adapter)
1043 struct ifnet *ifp = adapter->ifp;
1045 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1047 if (ifp->if_flags & IFF_PROMISC) {
1048 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1049 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1050 } else if (ifp->if_flags & IFF_ALLMULTI) {
1051 reg_rctl |= IXGB_RCTL_MPE;
1052 reg_rctl &= ~IXGB_RCTL_UPE;
1053 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1059 ixgb_disable_promisc(struct adapter * adapter)
1063 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1065 reg_rctl &= (~IXGB_RCTL_UPE);
1066 reg_rctl &= (~IXGB_RCTL_MPE);
1067 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1073 /*********************************************************************
1076 * This routine is called whenever multicast address list is updated.
1078 **********************************************************************/
1081 ixgb_set_multi(struct adapter * adapter)
1083 u_int32_t reg_rctl = 0;
1085 struct ifmultiaddr *ifma;
1087 struct ifnet *ifp = adapter->ifp;
1089 IOCTL_DEBUGOUT("ixgb_set_multi: begin");
1092 bzero(mta, sizeof(u_int8_t) * IXGB_ETH_LENGTH_OF_ADDRESS *
1093 MAX_NUM_MULTICAST_ADDRESSES);
1095 if_maddr_rlock(ifp);
1096 #if __FreeBSD_version < 500000
1097 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1099 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1101 if (ifma->ifma_addr->sa_family != AF_LINK)
1104 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1105 &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS);
1108 if_maddr_runlock(ifp);
1110 if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) {
1111 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1112 reg_rctl |= IXGB_RCTL_MPE;
1113 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1115 ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0);
1121 /*********************************************************************
1124 * This routine checks for link status and updates statistics.
1126 **********************************************************************/
1129 ixgb_local_timer(void *arg)
1132 struct adapter *adapter = arg;
1135 IXGB_LOCK_ASSERT(adapter);
1137 ixgb_check_for_link(&adapter->hw);
1138 ixgb_print_link_status(adapter);
1139 ixgb_update_stats_counters(adapter);
1140 if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) {
1141 ixgb_print_hw_stats(adapter);
1143 if (adapter->tx_timer != 0 && --adapter->tx_timer == 0)
1144 ixgb_watchdog(adapter);
1145 callout_reset(&adapter->timer, hz, ixgb_local_timer, adapter);
1149 ixgb_print_link_status(struct adapter * adapter)
1151 if (adapter->hw.link_up) {
1152 if (!adapter->link_active) {
1153 if_printf(adapter->ifp, "Link is up %d Mbps %s \n",
1156 adapter->link_active = 1;
1159 if (adapter->link_active) {
1160 if_printf(adapter->ifp, "Link is Down \n");
1161 adapter->link_active = 0;
1170 /*********************************************************************
1172 * This routine disables all traffic on the adapter by issuing a
1173 * global reset on the MAC and deallocates TX/RX buffers.
1175 **********************************************************************/
1178 ixgb_stop(void *arg)
1181 struct adapter *adapter = arg;
1184 IXGB_LOCK_ASSERT(adapter);
1186 INIT_DEBUGOUT("ixgb_stop: begin\n");
1187 ixgb_disable_intr(adapter);
1188 adapter->hw.adapter_stopped = FALSE;
1189 ixgb_adapter_stop(&adapter->hw);
1190 callout_stop(&adapter->timer);
1191 ixgb_free_transmit_structures(adapter);
1192 ixgb_free_receive_structures(adapter);
1194 /* Tell the stack that the interface is no longer active */
1195 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1196 adapter->tx_timer = 0;
1202 /*********************************************************************
1204 * Determine hardware revision.
1206 **********************************************************************/
1208 ixgb_identify_hardware(struct adapter * adapter)
1210 device_t dev = adapter->dev;
1212 /* Make sure our PCI config space has the necessary stuff set */
1213 pci_enable_busmaster(dev);
1214 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1216 /* Save off the information about this board */
1217 adapter->hw.vendor_id = pci_get_vendor(dev);
1218 adapter->hw.device_id = pci_get_device(dev);
1219 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1220 adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
1221 adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
1223 /* Set MacType, etc. based on this PCI info */
1224 switch (adapter->hw.device_id) {
1225 case IXGB_DEVICE_ID_82597EX:
1226 case IXGB_DEVICE_ID_82597EX_SR:
1227 adapter->hw.mac_type = ixgb_82597;
1230 INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id);
1231 device_printf(dev, "unsupported device id 0x%x\n",
1232 adapter->hw.device_id);
1239 ixgb_allocate_pci_resources(struct adapter * adapter)
1242 device_t dev = adapter->dev;
1245 adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY,
1248 if (!(adapter->res_memory)) {
1249 device_printf(dev, "Unable to allocate bus resource: memory\n");
1252 adapter->osdep.mem_bus_space_tag =
1253 rman_get_bustag(adapter->res_memory);
1254 adapter->osdep.mem_bus_space_handle =
1255 rman_get_bushandle(adapter->res_memory);
1256 adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle;
1259 adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ,
1261 RF_SHAREABLE | RF_ACTIVE);
1262 if (!(adapter->res_interrupt)) {
1264 "Unable to allocate bus resource: interrupt\n");
1267 if (bus_setup_intr(dev, adapter->res_interrupt,
1268 INTR_TYPE_NET | INTR_MPSAFE,
1269 NULL, (void (*) (void *))ixgb_intr, adapter,
1270 &adapter->int_handler_tag)) {
1271 device_printf(dev, "Error registering interrupt handler!\n");
1274 adapter->hw.back = &adapter->osdep;
1280 ixgb_free_pci_resources(struct adapter * adapter)
1282 device_t dev = adapter->dev;
1284 if (adapter->res_interrupt != NULL) {
1285 bus_teardown_intr(dev, adapter->res_interrupt,
1286 adapter->int_handler_tag);
1287 bus_release_resource(dev, SYS_RES_IRQ, 0,
1288 adapter->res_interrupt);
1290 if (adapter->res_memory != NULL) {
1291 bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA,
1292 adapter->res_memory);
1294 if (adapter->res_ioport != NULL) {
1295 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
1296 adapter->res_ioport);
1301 /*********************************************************************
1303 * Initialize the hardware to a configuration as specified by the
1304 * adapter structure. The controller is reset, the EEPROM is
1305 * verified, the MAC address is set, then the shared initialization
1306 * routines are called.
1308 **********************************************************************/
1310 ixgb_hardware_init(struct adapter * adapter)
1312 /* Issue a global reset */
1313 adapter->hw.adapter_stopped = FALSE;
1314 ixgb_adapter_stop(&adapter->hw);
1316 /* Make sure we have a good EEPROM before we read from it */
1317 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
1318 device_printf(adapter->dev,
1319 "The EEPROM Checksum Is Not Valid\n");
1322 if (!ixgb_init_hw(&adapter->hw)) {
1323 device_printf(adapter->dev, "Hardware Initialization Failed");
1330 /*********************************************************************
1332 * Setup networking device structure and register an interface.
1334 **********************************************************************/
1336 ixgb_setup_interface(device_t dev, struct adapter * adapter)
1339 INIT_DEBUGOUT("ixgb_setup_interface: begin");
1341 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1343 device_printf(dev, "can not allocate ifnet structure\n");
1346 #if __FreeBSD_version >= 502000
1347 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1349 ifp->if_unit = device_get_unit(dev);
1350 ifp->if_name = "ixgb";
1352 ifp->if_baudrate = 1000000000;
1353 ifp->if_init = ixgb_init;
1354 ifp->if_softc = adapter;
1355 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1356 ifp->if_ioctl = ixgb_ioctl;
1357 ifp->if_start = ixgb_start;
1358 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1360 #if __FreeBSD_version < 500000
1361 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1363 ether_ifattach(ifp, adapter->hw.curr_mac_addr);
1366 ifp->if_capabilities = IFCAP_HWCSUM;
1369 * Tell the upper layer(s) we support long frames.
1371 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1373 #if __FreeBSD_version >= 500000
1374 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1377 ifp->if_capenable = ifp->if_capabilities;
1379 #ifdef DEVICE_POLLING
1380 ifp->if_capabilities |= IFCAP_POLLING;
1384 * Specify the media types supported by this adapter and register
1385 * callbacks to update media and link information
1387 ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change,
1389 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1391 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1393 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1394 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1399 /********************************************************************
1400 * Manage DMA'able memory.
1401 *******************************************************************/
1403 ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1407 *(bus_addr_t *) arg = segs->ds_addr;
1412 ixgb_dma_malloc(struct adapter * adapter, bus_size_t size,
1413 struct ixgb_dma_alloc * dma, int mapflags)
1419 r = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1420 PAGE_SIZE, 0, /* alignment, bounds */
1421 BUS_SPACE_MAXADDR, /* lowaddr */
1422 BUS_SPACE_MAXADDR, /* highaddr */
1423 NULL, NULL, /* filter, filterarg */
1426 size, /* maxsegsize */
1427 BUS_DMA_ALLOCNOW, /* flags */
1428 #if __FreeBSD_version >= 502000
1429 NULL, /* lockfunc */
1430 NULL, /* lockfuncarg */
1434 device_printf(dev, "ixgb_dma_malloc: bus_dma_tag_create failed; "
1438 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1439 BUS_DMA_NOWAIT, &dma->dma_map);
1441 device_printf(dev, "ixgb_dma_malloc: bus_dmamem_alloc failed; "
1445 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1449 mapflags | BUS_DMA_NOWAIT);
1451 device_printf(dev, "ixgb_dma_malloc: bus_dmamap_load failed; "
1455 dma->dma_size = size;
1458 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1460 bus_dma_tag_destroy(dma->dma_tag);
1462 dma->dma_map = NULL;
1463 dma->dma_tag = NULL;
1470 ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma)
1472 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1473 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1474 bus_dma_tag_destroy(dma->dma_tag);
1477 /*********************************************************************
1479 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1480 * the information needed to transmit a packet on the wire.
1482 **********************************************************************/
1484 ixgb_allocate_transmit_structures(struct adapter * adapter)
1486 if (!(adapter->tx_buffer_area =
1487 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1488 adapter->num_tx_desc, M_DEVBUF,
1489 M_NOWAIT | M_ZERO))) {
1490 device_printf(adapter->dev,
1491 "Unable to allocate tx_buffer memory\n");
1494 bzero(adapter->tx_buffer_area,
1495 sizeof(struct ixgb_buffer) * adapter->num_tx_desc);
1500 /*********************************************************************
1502 * Allocate and initialize transmit structures.
1504 **********************************************************************/
1506 ixgb_setup_transmit_structures(struct adapter * adapter)
1509 * Setup DMA descriptor areas.
1511 if (bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1512 PAGE_SIZE, 0, /* alignment, bounds */
1513 BUS_SPACE_MAXADDR, /* lowaddr */
1514 BUS_SPACE_MAXADDR, /* highaddr */
1515 NULL, NULL, /* filter, filterarg */
1516 MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */
1517 IXGB_MAX_SCATTER, /* nsegments */
1518 MCLBYTES, /* maxsegsize */
1519 BUS_DMA_ALLOCNOW, /* flags */
1520 #if __FreeBSD_version >= 502000
1521 NULL, /* lockfunc */
1522 NULL, /* lockfuncarg */
1525 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1528 if (ixgb_allocate_transmit_structures(adapter))
1531 bzero((void *)adapter->tx_desc_base,
1532 (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc);
1534 adapter->next_avail_tx_desc = 0;
1535 adapter->oldest_used_tx_desc = 0;
1537 /* Set number of descriptors available */
1538 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1540 /* Set checksum context */
1541 adapter->active_checksum_context = OFFLOAD_NONE;
1546 /*********************************************************************
1548 * Enable transmit unit.
1550 **********************************************************************/
1552 ixgb_initialize_transmit_unit(struct adapter * adapter)
1555 u_int64_t tdba = adapter->txdma.dma_paddr;
1557 /* Setup the Base and Length of the Tx Descriptor Ring */
1558 IXGB_WRITE_REG(&adapter->hw, TDBAL,
1559 (tdba & 0x00000000ffffffffULL));
1560 IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1561 IXGB_WRITE_REG(&adapter->hw, TDLEN,
1562 adapter->num_tx_desc *
1563 sizeof(struct ixgb_tx_desc));
1565 /* Setup the HW Tx Head and Tail descriptor pointers */
1566 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
1567 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
1570 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1571 IXGB_READ_REG(&adapter->hw, TDBAL),
1572 IXGB_READ_REG(&adapter->hw, TDLEN));
1574 IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1577 /* Program the Transmit Control Register */
1578 reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL);
1579 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1580 IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1582 /* Setup Transmit Descriptor Settings for this adapter */
1583 adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1585 if (adapter->tx_int_delay > 0)
1586 adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1590 /*********************************************************************
1592 * Free all transmit related data structures.
1594 **********************************************************************/
1596 ixgb_free_transmit_structures(struct adapter * adapter)
1598 struct ixgb_buffer *tx_buffer;
1601 INIT_DEBUGOUT("free_transmit_structures: begin");
1603 if (adapter->tx_buffer_area != NULL) {
1604 tx_buffer = adapter->tx_buffer_area;
1605 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1606 if (tx_buffer->m_head != NULL) {
1607 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1608 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1609 m_freem(tx_buffer->m_head);
1611 tx_buffer->m_head = NULL;
1614 if (adapter->tx_buffer_area != NULL) {
1615 free(adapter->tx_buffer_area, M_DEVBUF);
1616 adapter->tx_buffer_area = NULL;
1618 if (adapter->txtag != NULL) {
1619 bus_dma_tag_destroy(adapter->txtag);
1620 adapter->txtag = NULL;
1625 /*********************************************************************
1627 * The offload context needs to be set when we transfer the first
1628 * packet of a particular protocol (TCP/UDP). We change the
1629 * context only if the protocol type changes.
1631 **********************************************************************/
1633 ixgb_transmit_checksum_setup(struct adapter * adapter,
1635 u_int8_t * txd_popts)
1637 struct ixgb_context_desc *TXD;
1638 struct ixgb_buffer *tx_buffer;
1641 if (mp->m_pkthdr.csum_flags) {
1643 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
1644 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1645 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
1648 adapter->active_checksum_context = OFFLOAD_TCP_IP;
1649 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
1650 *txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1651 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
1654 adapter->active_checksum_context = OFFLOAD_UDP_IP;
1665 * If we reach this point, the checksum offload context needs to be
1668 curr_txd = adapter->next_avail_tx_desc;
1669 tx_buffer = &adapter->tx_buffer_area[curr_txd];
1670 TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd];
1673 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1678 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
1680 ENET_HEADER_SIZE + sizeof(struct ip) +
1681 offsetof(struct tcphdr, th_sum);
1682 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
1684 ENET_HEADER_SIZE + sizeof(struct ip) +
1685 offsetof(struct udphdr, uh_sum);
1687 TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE;
1689 tx_buffer->m_head = NULL;
1691 if (++curr_txd == adapter->num_tx_desc)
1694 adapter->num_tx_desc_avail--;
1695 adapter->next_avail_tx_desc = curr_txd;
1699 /**********************************************************************
1701 * Examine each tx_buffer in the used queue. If the hardware is done
1702 * processing the packet then free associated resources. The
1703 * tx_buffer is put back on the free queue.
1705 **********************************************************************/
1707 ixgb_clean_transmit_interrupts(struct adapter * adapter)
1710 struct ixgb_buffer *tx_buffer;
1711 struct ixgb_tx_desc *tx_desc;
1713 IXGB_LOCK_ASSERT(adapter);
1715 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
1719 adapter->clean_tx_interrupts++;
1721 num_avail = adapter->num_tx_desc_avail;
1722 i = adapter->oldest_used_tx_desc;
1724 tx_buffer = &adapter->tx_buffer_area[i];
1725 tx_desc = &adapter->tx_desc_base[i];
1727 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1729 tx_desc->status = 0;
1732 if (tx_buffer->m_head) {
1733 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
1734 BUS_DMASYNC_POSTWRITE);
1735 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1736 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1737 m_freem(tx_buffer->m_head);
1738 tx_buffer->m_head = NULL;
1740 if (++i == adapter->num_tx_desc)
1743 tx_buffer = &adapter->tx_buffer_area[i];
1744 tx_desc = &adapter->tx_desc_base[i];
1747 adapter->oldest_used_tx_desc = i;
1750 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
1751 * it is OK to send packets. If there are no pending descriptors,
1752 * clear the timeout. Otherwise, if some descriptors have been freed,
1753 * restart the timeout.
1755 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) {
1756 struct ifnet *ifp = adapter->ifp;
1758 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1759 if (num_avail == adapter->num_tx_desc)
1760 adapter->tx_timer = 0;
1761 else if (num_avail == adapter->num_tx_desc_avail)
1762 adapter->tx_timer = IXGB_TX_TIMEOUT;
1764 adapter->num_tx_desc_avail = num_avail;
1769 /*********************************************************************
1771 * Get a buffer from system mbuf buffer pool.
1773 **********************************************************************/
1775 ixgb_get_buf(int i, struct adapter * adapter,
1778 register struct mbuf *mp = nmp;
1779 struct ixgb_buffer *rx_buffer;
1788 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1791 adapter->mbuf_alloc_failed++;
1794 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1796 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1797 mp->m_data = mp->m_ext.ext_buf;
1801 if (ifp->if_mtu <= ETHERMTU) {
1802 m_adj(mp, ETHER_ALIGN);
1804 rx_buffer = &adapter->rx_buffer_area[i];
1807 * Using memory from the mbuf cluster pool, invoke the bus_dma
1808 * machinery to arrange the memory mapping.
1810 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
1811 mtod(mp, void *), mp->m_len,
1812 ixgb_dmamap_cb, &paddr, 0);
1817 rx_buffer->m_head = mp;
1818 adapter->rx_desc_base[i].buff_addr = htole64(paddr);
1819 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
1824 /*********************************************************************
1826 * Allocate memory for rx_buffer structures. Since we use one
1827 * rx_buffer per received packet, the maximum number of rx_buffer's
1828 * that we'll need is equal to the number of receive descriptors
1829 * that we've allocated.
1831 **********************************************************************/
1833 ixgb_allocate_receive_structures(struct adapter * adapter)
1836 struct ixgb_buffer *rx_buffer;
1838 if (!(adapter->rx_buffer_area =
1839 (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) *
1840 adapter->num_rx_desc, M_DEVBUF,
1841 M_NOWAIT | M_ZERO))) {
1842 device_printf(adapter->dev,
1843 "Unable to allocate rx_buffer memory\n");
1846 bzero(adapter->rx_buffer_area,
1847 sizeof(struct ixgb_buffer) * adapter->num_rx_desc);
1849 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),/* parent */
1850 PAGE_SIZE, 0, /* alignment, bounds */
1851 BUS_SPACE_MAXADDR, /* lowaddr */
1852 BUS_SPACE_MAXADDR, /* highaddr */
1853 NULL, NULL, /* filter, filterarg */
1854 MCLBYTES, /* maxsize */
1856 MCLBYTES, /* maxsegsize */
1857 BUS_DMA_ALLOCNOW, /* flags */
1858 #if __FreeBSD_version >= 502000
1859 NULL, /* lockfunc */
1860 NULL, /* lockfuncarg */
1864 device_printf(adapter->dev, "ixgb_allocate_receive_structures: "
1865 "bus_dma_tag_create failed; error %u\n",
1869 rx_buffer = adapter->rx_buffer_area;
1870 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
1871 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
1874 device_printf(adapter->dev,
1875 "ixgb_allocate_receive_structures: "
1876 "bus_dmamap_create failed; error %u\n",
1882 for (i = 0; i < adapter->num_rx_desc; i++) {
1883 if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) {
1884 adapter->rx_buffer_area[i].m_head = NULL;
1885 adapter->rx_desc_base[i].buff_addr = 0;
1892 bus_dma_tag_destroy(adapter->rxtag);
1894 adapter->rxtag = NULL;
1895 free(adapter->rx_buffer_area, M_DEVBUF);
1896 adapter->rx_buffer_area = NULL;
1900 /*********************************************************************
1902 * Allocate and initialize receive structures.
1904 **********************************************************************/
1906 ixgb_setup_receive_structures(struct adapter * adapter)
1908 bzero((void *)adapter->rx_desc_base,
1909 (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc);
1911 if (ixgb_allocate_receive_structures(adapter))
1914 /* Setup our descriptor pointers */
1915 adapter->next_rx_desc_to_check = 0;
1916 adapter->next_rx_desc_to_use = 0;
1920 /*********************************************************************
1922 * Enable receive unit.
1924 **********************************************************************/
1926 ixgb_initialize_receive_unit(struct adapter * adapter)
1929 u_int32_t reg_rxcsum;
1930 u_int32_t reg_rxdctl;
1932 u_int64_t rdba = adapter->rxdma.dma_paddr;
1937 * Make sure receives are disabled while setting up the descriptor
1940 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1941 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1943 /* Set the Receive Delay Timer Register */
1944 IXGB_WRITE_REG(&adapter->hw, RDTR,
1945 adapter->rx_int_delay);
1948 /* Setup the Base and Length of the Rx Descriptor Ring */
1949 IXGB_WRITE_REG(&adapter->hw, RDBAL,
1950 (rdba & 0x00000000ffffffffULL));
1951 IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1952 IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
1953 sizeof(struct ixgb_rx_desc));
1955 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1956 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1958 IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
1962 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1963 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1964 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1965 IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl);
1969 if (adapter->raidc) {
1971 uint8_t poll_threshold;
1972 #define IXGB_RAIDC_POLL_DEFAULT 120
1974 poll_threshold = ((adapter->num_rx_desc - 1) >> 3);
1975 poll_threshold >>= 1;
1976 poll_threshold &= 0x3F;
1977 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1978 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1979 (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1981 IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc);
1983 /* Enable Receive Checksum Offload for TCP and UDP ? */
1984 if (ifp->if_capenable & IFCAP_RXCSUM) {
1985 reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM);
1986 reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1987 IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
1989 /* Setup the Receive Control Register */
1990 reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL);
1991 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1992 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1994 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1996 switch (adapter->rx_buffer_len) {
1998 case IXGB_RXBUFFER_2048:
1999 reg_rctl |= IXGB_RCTL_BSIZE_2048;
2001 case IXGB_RXBUFFER_4096:
2002 reg_rctl |= IXGB_RCTL_BSIZE_4096;
2004 case IXGB_RXBUFFER_8192:
2005 reg_rctl |= IXGB_RCTL_BSIZE_8192;
2007 case IXGB_RXBUFFER_16384:
2008 reg_rctl |= IXGB_RCTL_BSIZE_16384;
2012 reg_rctl |= IXGB_RCTL_RXEN;
2015 /* Enable Receives */
2016 IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2021 /*********************************************************************
2023 * Free receive related data structures.
2025 **********************************************************************/
2027 ixgb_free_receive_structures(struct adapter * adapter)
2029 struct ixgb_buffer *rx_buffer;
2032 INIT_DEBUGOUT("free_receive_structures: begin");
2034 if (adapter->rx_buffer_area != NULL) {
2035 rx_buffer = adapter->rx_buffer_area;
2036 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2037 if (rx_buffer->map != NULL) {
2038 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2039 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2041 if (rx_buffer->m_head != NULL)
2042 m_freem(rx_buffer->m_head);
2043 rx_buffer->m_head = NULL;
2046 if (adapter->rx_buffer_area != NULL) {
2047 free(adapter->rx_buffer_area, M_DEVBUF);
2048 adapter->rx_buffer_area = NULL;
2050 if (adapter->rxtag != NULL) {
2051 bus_dma_tag_destroy(adapter->rxtag);
2052 adapter->rxtag = NULL;
2057 /*********************************************************************
2059 * This routine executes in interrupt context. It replenishes
2060 * the mbufs in the descriptor and sends data which has been
2061 * dma'ed into host memory to upper layer.
2063 * We loop at most count times if count is > 0, or until done if
2066 *********************************************************************/
2068 ixgb_process_receive_interrupts(struct adapter * adapter, int count)
2072 #if __FreeBSD_version < 500000
2073 struct ether_header *eh;
2077 u_int8_t accept_frame = 0;
2079 int next_to_use = 0;
2082 /* Pointer to the receive descriptor being examined. */
2083 struct ixgb_rx_desc *current_desc;
2085 IXGB_LOCK_ASSERT(adapter);
2088 i = adapter->next_rx_desc_to_check;
2089 next_to_use = adapter->next_rx_desc_to_use;
2090 eop_desc = adapter->next_rx_desc_to_check;
2091 current_desc = &adapter->rx_desc_base[i];
2093 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) {
2095 adapter->no_pkts_avail++;
2099 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) {
2101 mp = adapter->rx_buffer_area[i].m_head;
2102 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2103 BUS_DMASYNC_POSTREAD);
2105 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
2111 len = current_desc->length;
2113 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2114 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2115 IXGB_RX_DESC_ERRORS_RXE)) {
2120 /* Assign correct length to the current fragment */
2123 if (adapter->fmp == NULL) {
2124 mp->m_pkthdr.len = len;
2125 adapter->fmp = mp; /* Store the first mbuf */
2128 /* Chain mbuf's together */
2129 mp->m_flags &= ~M_PKTHDR;
2130 adapter->lmp->m_next = mp;
2131 adapter->lmp = adapter->lmp->m_next;
2132 adapter->fmp->m_pkthdr.len += len;
2137 adapter->fmp->m_pkthdr.rcvif = ifp;
2139 #if __FreeBSD_version < 500000
2140 eh = mtod(adapter->fmp, struct ether_header *);
2142 /* Remove ethernet header from mbuf */
2143 m_adj(adapter->fmp, sizeof(struct ether_header));
2144 ixgb_receive_checksum(adapter, current_desc,
2147 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2148 VLAN_INPUT_TAG(eh, adapter->fmp,
2149 current_desc->special);
2151 ether_input(ifp, eh, adapter->fmp);
2153 ixgb_receive_checksum(adapter, current_desc,
2155 #if __FreeBSD_version < 700000
2156 if (current_desc->status & IXGB_RX_DESC_STATUS_VP)
2157 VLAN_INPUT_TAG(ifp, adapter->fmp,
2158 current_desc->special);
2160 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
2161 adapter->fmp->m_pkthdr.ether_vtag =
2162 current_desc->special;
2163 adapter->fmp->m_flags |= M_VLANTAG;
2167 if (adapter->fmp != NULL) {
2168 IXGB_UNLOCK(adapter);
2169 (*ifp->if_input) (ifp, adapter->fmp);
2174 adapter->fmp = NULL;
2175 adapter->lmp = NULL;
2177 adapter->rx_buffer_area[i].m_head = NULL;
2179 adapter->dropped_pkts++;
2180 if (adapter->fmp != NULL)
2181 m_freem(adapter->fmp);
2182 adapter->fmp = NULL;
2183 adapter->lmp = NULL;
2186 /* Zero out the receive descriptors status */
2187 current_desc->status = 0;
2189 /* Advance our pointers to the next descriptor */
2190 if (++i == adapter->num_rx_desc) {
2192 current_desc = adapter->rx_desc_base;
2196 adapter->next_rx_desc_to_check = i;
2199 i = (adapter->num_rx_desc - 1);
2202 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
2203 * memory corruption). Avoid using and re-submitting the most recently received RX
2204 * descriptor back to hardware.
2206 * if(Last written back descriptor == EOP bit set descriptor)
2207 * then avoid re-submitting the most recently received RX descriptor
2209 * if(Last written back descriptor != EOP bit set descriptor)
2210 * then avoid re-submitting the most recently received RX descriptors
2211 * till last EOP bit set descriptor.
2213 if (eop_desc != i) {
2214 if (++eop_desc == adapter->num_rx_desc)
2218 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
2219 while (next_to_use != i) {
2220 current_desc = &adapter->rx_desc_base[next_to_use];
2221 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
2222 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
2223 IXGB_RX_DESC_ERRORS_RXE))) {
2224 mp = adapter->rx_buffer_area[next_to_use].m_head;
2225 ixgb_get_buf(next_to_use, adapter, mp);
2227 if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS)
2230 /* Advance our pointers to the next descriptor */
2231 if (++next_to_use == adapter->num_rx_desc) {
2233 current_desc = adapter->rx_desc_base;
2237 adapter->next_rx_desc_to_use = next_to_use;
2238 if (--next_to_use < 0)
2239 next_to_use = (adapter->num_rx_desc - 1);
2240 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */
2241 IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use);
2246 /*********************************************************************
2248 * Verify that the hardware indicated that the checksum is valid.
2249 * Inform the stack about the status of checksum so that stack
2250 * doesn't spend time verifying the checksum.
2252 *********************************************************************/
2254 ixgb_receive_checksum(struct adapter * adapter,
2255 struct ixgb_rx_desc * rx_desc,
2258 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
2259 mp->m_pkthdr.csum_flags = 0;
2262 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
2264 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
2265 /* IP Checksum Good */
2266 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2267 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2270 mp->m_pkthdr.csum_flags = 0;
2273 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
2275 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
2276 mp->m_pkthdr.csum_flags |=
2277 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2278 mp->m_pkthdr.csum_data = htons(0xffff);
2286 ixgb_enable_vlans(struct adapter * adapter)
2290 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2291 ctrl |= IXGB_CTRL0_VME;
2292 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2299 ixgb_enable_intr(struct adapter * adapter)
2301 IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW |
2302 IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO));
2307 ixgb_disable_intr(struct adapter * adapter)
2309 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
2314 ixgb_write_pci_cfg(struct ixgb_hw * hw,
2318 pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg,
2322 /**********************************************************************
2324 * Update the board statistics counters.
2326 **********************************************************************/
2328 ixgb_update_stats_counters(struct adapter * adapter)
2332 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
2333 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
2334 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
2335 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
2336 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
2337 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
2338 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
2339 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
2340 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
2341 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
2343 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
2344 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
2345 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
2346 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
2347 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
2348 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
2349 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
2350 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
2351 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
2352 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
2353 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
2354 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
2355 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
2356 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
2357 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
2358 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
2359 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
2360 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
2361 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
2362 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
2363 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
2364 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
2365 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
2366 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
2367 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
2368 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
2369 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
2371 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
2372 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
2373 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
2374 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
2375 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
2376 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
2377 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
2378 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
2379 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
2380 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
2381 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
2382 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
2383 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
2384 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
2385 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
2386 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
2387 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
2388 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
2389 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
2390 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
2391 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
2392 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
2396 /* Fill out the OS statistics structure */
2397 ifp->if_ipackets = adapter->stats.gprcl;
2398 ifp->if_opackets = adapter->stats.gptcl;
2399 ifp->if_ibytes = adapter->stats.gorcl;
2400 ifp->if_obytes = adapter->stats.gotcl;
2401 ifp->if_imcasts = adapter->stats.mprcl;
2402 ifp->if_collisions = 0;
2406 adapter->dropped_pkts +
2407 adapter->stats.crcerrs +
2408 adapter->stats.rnbc +
2409 adapter->stats.mpc +
2410 adapter->stats.rlec;
2416 /**********************************************************************
2418 * This routine is called only when ixgb_display_debug_stats is enabled.
2419 * This routine provides a way to take a look at important statistics
2420 * maintained by the driver and hardware.
2422 **********************************************************************/
2424 ixgb_print_hw_stats(struct adapter * adapter)
2426 char buf_speed[100], buf_type[100];
2427 ixgb_bus_speed bus_speed;
2428 ixgb_bus_type bus_type;
2433 device_printf(dev, "Packets not Avail = %ld\n",
2434 adapter->no_pkts_avail);
2435 device_printf(dev, "CleanTxInterrupts = %ld\n",
2436 adapter->clean_tx_interrupts);
2437 device_printf(dev, "ICR RXDMT0 = %lld\n",
2438 (long long)adapter->sv_stats.icr_rxdmt0);
2439 device_printf(dev, "ICR RXO = %lld\n",
2440 (long long)adapter->sv_stats.icr_rxo);
2441 device_printf(dev, "ICR RXT0 = %lld\n",
2442 (long long)adapter->sv_stats.icr_rxt0);
2443 device_printf(dev, "ICR TXDW = %lld\n",
2444 (long long)adapter->sv_stats.icr_TXDW);
2447 bus_speed = adapter->hw.bus.speed;
2448 bus_type = adapter->hw.bus.type;
2450 bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2451 bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2452 bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2453 bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2455 device_printf(dev, "PCI_Bus_Speed = %s\n",
2459 bus_type == ixgb_bus_type_pci ? "PCI" :
2460 bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2462 device_printf(dev, "PCI_Bus_Type = %s\n",
2465 device_printf(dev, "Tx Descriptors not Avail1 = %ld\n",
2466 adapter->no_tx_desc_avail1);
2467 device_printf(dev, "Tx Descriptors not Avail2 = %ld\n",
2468 adapter->no_tx_desc_avail2);
2469 device_printf(dev, "Std Mbuf Failed = %ld\n",
2470 adapter->mbuf_alloc_failed);
2471 device_printf(dev, "Std Cluster Failed = %ld\n",
2472 adapter->mbuf_cluster_failed);
2474 device_printf(dev, "Defer count = %lld\n",
2475 (long long)adapter->stats.dc);
2476 device_printf(dev, "Missed Packets = %lld\n",
2477 (long long)adapter->stats.mpc);
2478 device_printf(dev, "Receive No Buffers = %lld\n",
2479 (long long)adapter->stats.rnbc);
2480 device_printf(dev, "Receive length errors = %lld\n",
2481 (long long)adapter->stats.rlec);
2482 device_printf(dev, "Crc errors = %lld\n",
2483 (long long)adapter->stats.crcerrs);
2484 device_printf(dev, "Driver dropped packets = %ld\n",
2485 adapter->dropped_pkts);
2487 device_printf(dev, "XON Rcvd = %lld\n",
2488 (long long)adapter->stats.xonrxc);
2489 device_printf(dev, "XON Xmtd = %lld\n",
2490 (long long)adapter->stats.xontxc);
2491 device_printf(dev, "XOFF Rcvd = %lld\n",
2492 (long long)adapter->stats.xoffrxc);
2493 device_printf(dev, "XOFF Xmtd = %lld\n",
2494 (long long)adapter->stats.xofftxc);
2496 device_printf(dev, "Good Packets Rcvd = %lld\n",
2497 (long long)adapter->stats.gprcl);
2498 device_printf(dev, "Good Packets Xmtd = %lld\n",
2499 (long long)adapter->stats.gptcl);
2501 device_printf(dev, "Jumbo frames recvd = %lld\n",
2502 (long long)adapter->stats.jprcl);
2503 device_printf(dev, "Jumbo frames Xmtd = %lld\n",
2504 (long long)adapter->stats.jptcl);
2511 ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS)
2515 struct adapter *adapter;
2518 error = sysctl_handle_int(oidp, &result, 0, req);
2520 if (error || !req->newptr)
2524 adapter = (struct adapter *) arg1;
2525 ixgb_print_hw_stats(adapter);