1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
45 *********************************************************************/
46 char ixv_driver_version[] = "1.4.0";
48 /*********************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64 /* required last entry */
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
72 static char *ixv_strings[] = {
73 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 /*********************************************************************
78 *********************************************************************/
79 static int ixv_probe(device_t);
80 static int ixv_attach(device_t);
81 static int ixv_detach(device_t);
82 static int ixv_shutdown(device_t);
83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void ixv_init(void *);
85 static void ixv_init_locked(struct adapter *);
86 static void ixv_stop(void *);
87 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int ixv_media_change(struct ifnet *);
89 static void ixv_identify_hardware(struct adapter *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_setup_msix(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
96 static void ixv_config_link(struct adapter *);
98 static void ixv_initialize_transmit_units(struct adapter *);
99 static void ixv_initialize_receive_units(struct adapter *);
101 static void ixv_enable_intr(struct adapter *);
102 static void ixv_disable_intr(struct adapter *);
103 static void ixv_set_multi(struct adapter *);
104 static void ixv_update_link_status(struct adapter *);
105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void ixv_configure_ivars(struct adapter *);
108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110 static void ixv_setup_vlan_support(struct adapter *);
111 static void ixv_register_vlan(void *, struct ifnet *, u16);
112 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
114 static void ixv_save_stats(struct adapter *);
115 static void ixv_init_stats(struct adapter *);
116 static void ixv_update_stats(struct adapter *);
117 static void ixv_add_stats_sysctls(struct adapter *);
118 static void ixv_set_sysctl_value(struct adapter *, const char *,
119 const char *, int *, int);
121 /* The MSI/X Interrupt handlers */
122 static void ixv_msix_que(void *);
123 static void ixv_msix_mbx(void *);
125 /* Deferred interrupt tasklets */
126 static void ixv_handle_que(void *, int);
127 static void ixv_handle_mbx(void *, int);
131 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
134 extern void ixgbe_netmap_attach(struct adapter *adapter);
136 #include <net/netmap.h>
137 #include <sys/selinfo.h>
138 #include <dev/netmap/netmap_kern.h>
139 #endif /* DEV_NETMAP */
141 /*********************************************************************
142 * FreeBSD Device Interface Entry Points
143 *********************************************************************/
145 static device_method_t ixv_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, ixv_probe),
148 DEVMETHOD(device_attach, ixv_attach),
149 DEVMETHOD(device_detach, ixv_detach),
150 DEVMETHOD(device_shutdown, ixv_shutdown),
154 static driver_t ixv_driver = {
155 "ixv", ixv_methods, sizeof(struct adapter),
158 devclass_t ixv_devclass;
159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160 MODULE_DEPEND(ixv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 MODULE_DEPEND(ix, netmap, 1, 1, 1);
164 #endif /* DEV_NETMAP */
165 /* XXX depend on 'ix' ? */
168 ** TUNEABLE PARAMETERS:
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
176 ** AIM: Adaptive Interrupt Moderation
177 ** which means that the interrupt rate
178 ** is varied over time based on the
179 ** traffic for that interrupt vector
181 static int ixv_enable_aim = FALSE;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
192 /* Flow control setting, default to full */
193 static int ixv_flow_control = ixgbe_fc_full;
194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
197 * Header split: this causes the hardware to DMA
198 * the header into a seperate mbuf from the payload,
199 * it can be a performance win in some workloads, but
200 * in others it actually hurts, its off by default.
202 static int ixv_header_split = FALSE;
203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
206 ** Number of TX descriptors per ring,
207 ** setting higher than RX as this seems
208 ** the better performing choice.
210 static int ixv_txd = DEFAULT_TXD;
211 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
213 /* Number of RX descriptors per ring */
214 static int ixv_rxd = DEFAULT_RXD;
215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
218 ** Shadow VFTA table, this is needed because
219 ** the real filter table gets cleared during
220 ** a soft reset and we need to repopulate it.
222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
224 /*********************************************************************
225 * Device identification routine
227 * ixv_probe determines if the driver should be loaded on
228 * adapter based on PCI vendor/device id of the adapter.
230 * return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
234 ixv_probe(device_t dev)
236 ixgbe_vendor_info_t *ent;
238 u16 pci_vendor_id = 0;
239 u16 pci_device_id = 0;
240 u16 pci_subvendor_id = 0;
241 u16 pci_subdevice_id = 0;
242 char adapter_name[256];
245 pci_vendor_id = pci_get_vendor(dev);
246 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
249 pci_device_id = pci_get_device(dev);
250 pci_subvendor_id = pci_get_subvendor(dev);
251 pci_subdevice_id = pci_get_subdevice(dev);
253 ent = ixv_vendor_info_array;
254 while (ent->vendor_id != 0) {
255 if ((pci_vendor_id == ent->vendor_id) &&
256 (pci_device_id == ent->device_id) &&
258 ((pci_subvendor_id == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
261 ((pci_subdevice_id == ent->subdevice_id) ||
262 (ent->subdevice_id == 0))) {
263 sprintf(adapter_name, "%s, Version - %s",
264 ixv_strings[ent->index],
266 device_set_desc_copy(dev, adapter_name);
267 return (BUS_PROBE_DEFAULT);
274 /*********************************************************************
275 * Device initialization routine
277 * The attach entry point is called when the driver is being loaded.
278 * This routine identifies the type of hardware, allocates all resources
279 * and initializes the hardware.
281 * return 0 on success, positive on failure
282 *********************************************************************/
285 ixv_attach(device_t dev)
287 struct adapter *adapter;
291 INIT_DEBUGOUT("ixv_attach: begin");
293 /* Allocate, clear, and link in our adapter structure */
294 adapter = device_get_softc(dev);
295 adapter->dev = adapter->osdep.dev = dev;
299 adapter->init_locked = ixv_init_locked;
300 adapter->stop_locked = ixv_stop;
304 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314 OID_AUTO, "enable_aim", CTLFLAG_RW,
315 &ixv_enable_aim, 1, "Interrupt Moderation");
317 /* Set up the timer callout */
318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
320 /* Determine hardware revision */
321 ixv_identify_hardware(adapter);
323 /* Do base PCI setup - map BAR0 */
324 if (ixv_allocate_pci_resources(adapter)) {
325 device_printf(dev, "Allocation of PCI resources failed\n");
330 /* Sysctls for limiting the amount of work done in the taskqueues */
331 ixv_set_sysctl_value(adapter, "rx_processing_limit",
332 "max number of rx packets to process",
333 &adapter->rx_process_limit, ixv_rx_process_limit);
335 ixv_set_sysctl_value(adapter, "tx_processing_limit",
336 "max number of tx packets to process",
337 &adapter->tx_process_limit, ixv_tx_process_limit);
339 /* Sysctls for limiting the amount of work done in the taskqueues */
340 ixv_set_sysctl_value(adapter, "rx_processing_limit",
341 "max number of rx packets to process",
342 &adapter->rx_process_limit, ixv_rx_process_limit);
344 ixv_set_sysctl_value(adapter, "tx_processing_limit",
345 "max number of tx packets to process",
346 &adapter->tx_process_limit, ixv_tx_process_limit);
348 /* Do descriptor calc and sanity checks */
349 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351 device_printf(dev, "TXD config issue, using default!\n");
352 adapter->num_tx_desc = DEFAULT_TXD;
354 adapter->num_tx_desc = ixv_txd;
356 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358 device_printf(dev, "RXD config issue, using default!\n");
359 adapter->num_rx_desc = DEFAULT_RXD;
361 adapter->num_rx_desc = ixv_rxd;
363 /* Allocate our TX/RX Queues */
364 if (ixgbe_allocate_queues(adapter)) {
370 ** Initialize the shared code: its
371 ** at this point the mac type is set.
373 error = ixgbe_init_shared_code(hw);
375 device_printf(dev,"Shared Code Initialization Failure\n");
380 /* Setup the mailbox */
381 ixgbe_init_mbx_params_vf(hw);
385 /* Get the Mailbox API version */
386 device_printf(dev,"MBX API %d negotiation: %d\n",
388 ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
390 error = ixgbe_init_hw(hw);
392 device_printf(dev,"Hardware Initialization Failure\n");
397 error = ixv_allocate_msix(adapter);
401 /* If no mac address was assigned, make a random one */
402 if (!ixv_check_ether_addr(hw->mac.addr)) {
403 u8 addr[ETHER_ADDR_LEN];
404 arc4rand(&addr, sizeof(addr), 0);
407 bcopy(addr, hw->mac.addr, sizeof(addr));
410 /* Setup OS specific network interface */
411 ixv_setup_interface(dev, adapter);
413 /* Do the stats setup */
414 ixv_save_stats(adapter);
415 ixv_init_stats(adapter);
416 ixv_add_stats_sysctls(adapter);
418 /* Register for VLAN events */
419 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
420 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
421 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
422 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
425 ixgbe_netmap_attach(adapter);
426 #endif /* DEV_NETMAP */
427 INIT_DEBUGOUT("ixv_attach: end");
431 ixgbe_free_transmit_structures(adapter);
432 ixgbe_free_receive_structures(adapter);
434 ixv_free_pci_resources(adapter);
439 /*********************************************************************
440 * Device removal routine
442 * The detach entry point is called when the driver is being removed.
443 * This routine stops the adapter and deallocates all the resources
444 * that were allocated for driver operation.
446 * return 0 on success, positive on failure
447 *********************************************************************/
450 ixv_detach(device_t dev)
452 struct adapter *adapter = device_get_softc(dev);
453 struct ix_queue *que = adapter->queues;
455 INIT_DEBUGOUT("ixv_detach: begin");
457 /* Make sure VLANS are not using driver */
458 if (adapter->ifp->if_vlantrunk != NULL) {
459 device_printf(dev,"Vlan in use, detach first\n");
463 IXGBE_CORE_LOCK(adapter);
465 IXGBE_CORE_UNLOCK(adapter);
467 for (int i = 0; i < adapter->num_queues; i++, que++) {
469 struct tx_ring *txr = que->txr;
470 taskqueue_drain(que->tq, &txr->txq_task);
471 taskqueue_drain(que->tq, &que->que_task);
472 taskqueue_free(que->tq);
476 /* Drain the Mailbox(link) queue */
478 taskqueue_drain(adapter->tq, &adapter->link_task);
479 taskqueue_free(adapter->tq);
482 /* Unregister VLAN events */
483 if (adapter->vlan_attach != NULL)
484 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
485 if (adapter->vlan_detach != NULL)
486 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
488 ether_ifdetach(adapter->ifp);
489 callout_drain(&adapter->timer);
491 netmap_detach(adapter->ifp);
492 #endif /* DEV_NETMAP */
493 ixv_free_pci_resources(adapter);
494 bus_generic_detach(dev);
495 if_free(adapter->ifp);
497 ixgbe_free_transmit_structures(adapter);
498 ixgbe_free_receive_structures(adapter);
500 IXGBE_CORE_LOCK_DESTROY(adapter);
504 /*********************************************************************
506 * Shutdown entry point
508 **********************************************************************/
510 ixv_shutdown(device_t dev)
512 struct adapter *adapter = device_get_softc(dev);
513 IXGBE_CORE_LOCK(adapter);
515 IXGBE_CORE_UNLOCK(adapter);
520 /*********************************************************************
523 * ixv_ioctl is called when the user wants to configure the
526 * return 0 on success, positive on failure
527 **********************************************************************/
530 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
532 struct adapter *adapter = ifp->if_softc;
533 struct ifreq *ifr = (struct ifreq *) data;
534 #if defined(INET) || defined(INET6)
535 struct ifaddr *ifa = (struct ifaddr *) data;
536 bool avoid_reset = FALSE;
544 if (ifa->ifa_addr->sa_family == AF_INET)
548 if (ifa->ifa_addr->sa_family == AF_INET6)
551 #if defined(INET) || defined(INET6)
553 ** Calling init results in link renegotiation,
554 ** so we avoid doing it when possible.
557 ifp->if_flags |= IFF_UP;
558 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
560 if (!(ifp->if_flags & IFF_NOARP))
561 arp_ifinit(ifp, ifa);
563 error = ether_ioctl(ifp, command, data);
567 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
568 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
571 IXGBE_CORE_LOCK(adapter);
572 ifp->if_mtu = ifr->ifr_mtu;
573 adapter->max_frame_size =
574 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
575 ixv_init_locked(adapter);
576 IXGBE_CORE_UNLOCK(adapter);
580 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
581 IXGBE_CORE_LOCK(adapter);
582 if (ifp->if_flags & IFF_UP) {
583 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
584 ixv_init_locked(adapter);
586 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
588 adapter->if_flags = ifp->if_flags;
589 IXGBE_CORE_UNLOCK(adapter);
593 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
594 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
595 IXGBE_CORE_LOCK(adapter);
596 ixv_disable_intr(adapter);
597 ixv_set_multi(adapter);
598 ixv_enable_intr(adapter);
599 IXGBE_CORE_UNLOCK(adapter);
604 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
605 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
609 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
610 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
611 if (mask & IFCAP_HWCSUM)
612 ifp->if_capenable ^= IFCAP_HWCSUM;
613 if (mask & IFCAP_TSO4)
614 ifp->if_capenable ^= IFCAP_TSO4;
615 if (mask & IFCAP_LRO)
616 ifp->if_capenable ^= IFCAP_LRO;
617 if (mask & IFCAP_VLAN_HWTAGGING)
618 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
619 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
620 IXGBE_CORE_LOCK(adapter);
621 ixv_init_locked(adapter);
622 IXGBE_CORE_UNLOCK(adapter);
624 VLAN_CAPABILITIES(ifp);
629 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
630 error = ether_ioctl(ifp, command, data);
637 /*********************************************************************
640 * This routine is used in two ways. It is used by the stack as
641 * init entry point in network interface structure. It is also used
642 * by the driver as a hw/sw initialization routine to get to a
645 * return 0 on success, positive on failure
646 **********************************************************************/
647 #define IXGBE_MHADD_MFS_SHIFT 16
650 ixv_init_locked(struct adapter *adapter)
652 struct ifnet *ifp = adapter->ifp;
653 device_t dev = adapter->dev;
654 struct ixgbe_hw *hw = &adapter->hw;
657 INIT_DEBUGOUT("ixv_init: begin");
658 mtx_assert(&adapter->core_mtx, MA_OWNED);
659 hw->adapter_stopped = FALSE;
660 ixgbe_stop_adapter(hw);
661 callout_stop(&adapter->timer);
663 /* reprogram the RAR[0] in case user changed it. */
664 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
666 /* Get the latest mac address, User can use a LAA */
667 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
668 IXGBE_ETH_LENGTH_OF_ADDRESS);
669 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
670 hw->addr_ctrl.rar_used_count = 1;
672 /* Prepare transmit descriptors and buffers */
673 if (ixgbe_setup_transmit_structures(adapter)) {
674 device_printf(dev,"Could not setup transmit structures\n");
680 ixv_initialize_transmit_units(adapter);
682 /* Setup Multicast table */
683 ixv_set_multi(adapter);
686 ** Determine the correct mbuf pool
687 ** for doing jumbo/headersplit
689 if (ifp->if_mtu > ETHERMTU)
690 adapter->rx_mbuf_sz = MJUMPAGESIZE;
692 adapter->rx_mbuf_sz = MCLBYTES;
694 /* Prepare receive descriptors and buffers */
695 if (ixgbe_setup_receive_structures(adapter)) {
696 device_printf(dev,"Could not setup receive structures\n");
701 /* Configure RX settings */
702 ixv_initialize_receive_units(adapter);
704 /* Enable Enhanced MSIX mode */
705 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
706 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
707 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
708 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
710 /* Set the various hardware offload abilities */
711 ifp->if_hwassist = 0;
712 if (ifp->if_capenable & IFCAP_TSO4)
713 ifp->if_hwassist |= CSUM_TSO;
714 if (ifp->if_capenable & IFCAP_TXCSUM) {
715 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
716 #if __FreeBSD_version >= 800000
717 ifp->if_hwassist |= CSUM_SCTP;
722 if (ifp->if_mtu > ETHERMTU) {
723 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
724 mhadd &= ~IXGBE_MHADD_MFS_MASK;
725 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
726 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
729 /* Set up VLAN offload and filter */
730 ixv_setup_vlan_support(adapter);
732 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
734 /* Set up MSI/X routing */
735 ixv_configure_ivars(adapter);
737 /* Set up auto-mask */
738 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
740 /* Set moderation on the Link interrupt */
741 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
744 ixv_init_stats(adapter);
746 /* Config/Enable Link */
747 ixv_config_link(adapter);
749 /* And now turn on interrupts */
750 ixv_enable_intr(adapter);
752 /* Now inform the stack we're ready */
753 ifp->if_drv_flags |= IFF_DRV_RUNNING;
754 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
762 struct adapter *adapter = arg;
764 IXGBE_CORE_LOCK(adapter);
765 ixv_init_locked(adapter);
766 IXGBE_CORE_UNLOCK(adapter);
773 ** MSIX Interrupt Handlers and Tasklets
778 ixv_enable_queue(struct adapter *adapter, u32 vector)
780 struct ixgbe_hw *hw = &adapter->hw;
781 u32 queue = 1 << vector;
784 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
785 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
789 ixv_disable_queue(struct adapter *adapter, u32 vector)
791 struct ixgbe_hw *hw = &adapter->hw;
792 u64 queue = (u64)(1 << vector);
795 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
796 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
800 ixv_rearm_queues(struct adapter *adapter, u64 queues)
802 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
803 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
808 ixv_handle_que(void *context, int pending)
810 struct ix_queue *que = context;
811 struct adapter *adapter = que->adapter;
812 struct tx_ring *txr = que->txr;
813 struct ifnet *ifp = adapter->ifp;
816 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
817 more = ixgbe_rxeof(que);
820 #if __FreeBSD_version >= 800000
821 if (!drbr_empty(ifp, txr->br))
822 ixgbe_mq_start_locked(ifp, txr);
824 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
825 ixgbe_start_locked(txr, ifp);
827 IXGBE_TX_UNLOCK(txr);
829 taskqueue_enqueue(que->tq, &que->que_task);
834 /* Reenable this interrupt */
835 ixv_enable_queue(adapter, que->msix);
839 /*********************************************************************
841 * MSI Queue Interrupt Service routine
843 **********************************************************************/
845 ixv_msix_que(void *arg)
847 struct ix_queue *que = arg;
848 struct adapter *adapter = que->adapter;
849 struct ifnet *ifp = adapter->ifp;
850 struct tx_ring *txr = que->txr;
851 struct rx_ring *rxr = que->rxr;
855 ixv_disable_queue(adapter, que->msix);
858 more = ixgbe_rxeof(que);
863 ** Make certain that if the stack
864 ** has anything queued the task gets
865 ** scheduled to handle it.
867 #ifdef IXGBE_LEGACY_TX
868 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
869 ixgbe_start_locked(txr, ifp);
871 if (!drbr_empty(adapter->ifp, txr->br))
872 ixgbe_mq_start_locked(ifp, txr);
874 IXGBE_TX_UNLOCK(txr);
878 if (ixv_enable_aim == FALSE)
881 ** Do Adaptive Interrupt Moderation:
882 ** - Write out last calculated setting
883 ** - Calculate based on average size over
884 ** the last interval.
886 if (que->eitr_setting)
887 IXGBE_WRITE_REG(&adapter->hw,
888 IXGBE_VTEITR(que->msix),
891 que->eitr_setting = 0;
893 /* Idle, do nothing */
894 if ((txr->bytes == 0) && (rxr->bytes == 0))
897 if ((txr->bytes) && (txr->packets))
898 newitr = txr->bytes/txr->packets;
899 if ((rxr->bytes) && (rxr->packets))
901 (rxr->bytes / rxr->packets));
902 newitr += 24; /* account for hardware frame, crc */
904 /* set an upper boundary */
905 newitr = min(newitr, 3000);
907 /* Be nice to the mid range */
908 if ((newitr > 300) && (newitr < 1200))
909 newitr = (newitr / 3);
911 newitr = (newitr / 2);
913 newitr |= newitr << 16;
915 /* save for next interrupt */
916 que->eitr_setting = newitr;
926 taskqueue_enqueue(que->tq, &que->que_task);
927 else /* Reenable this interrupt */
928 ixv_enable_queue(adapter, que->msix);
933 ixv_msix_mbx(void *arg)
935 struct adapter *adapter = arg;
936 struct ixgbe_hw *hw = &adapter->hw;
941 /* First get the cause */
942 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
943 /* Clear interrupt with write */
944 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
946 /* Link status change */
947 if (reg & IXGBE_EICR_LSC)
948 taskqueue_enqueue(adapter->tq, &adapter->link_task);
950 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
954 /*********************************************************************
956 * Media Ioctl callback
958 * This routine is called whenever the user queries the status of
959 * the interface using ifconfig.
961 **********************************************************************/
963 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
965 struct adapter *adapter = ifp->if_softc;
967 INIT_DEBUGOUT("ixv_media_status: begin");
968 IXGBE_CORE_LOCK(adapter);
969 ixv_update_link_status(adapter);
971 ifmr->ifm_status = IFM_AVALID;
972 ifmr->ifm_active = IFM_ETHER;
974 if (!adapter->link_active) {
975 IXGBE_CORE_UNLOCK(adapter);
979 ifmr->ifm_status |= IFM_ACTIVE;
981 switch (adapter->link_speed) {
982 case IXGBE_LINK_SPEED_1GB_FULL:
983 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
985 case IXGBE_LINK_SPEED_10GB_FULL:
986 ifmr->ifm_active |= IFM_FDX;
990 IXGBE_CORE_UNLOCK(adapter);
995 /*********************************************************************
997 * Media Ioctl callback
999 * This routine is called when the user changes speed/duplex using
1000 * media/mediopt option with ifconfig.
1002 **********************************************************************/
1004 ixv_media_change(struct ifnet * ifp)
1006 struct adapter *adapter = ifp->if_softc;
1007 struct ifmedia *ifm = &adapter->media;
1009 INIT_DEBUGOUT("ixv_media_change: begin");
1011 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1014 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1018 device_printf(adapter->dev, "Only auto media type\n");
1026 /*********************************************************************
1029 * This routine is called whenever multicast address list is updated.
1031 **********************************************************************/
1032 #define IXGBE_RAR_ENTRIES 16
1035 ixv_set_multi(struct adapter *adapter)
1037 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1039 struct ifmultiaddr *ifma;
1041 struct ifnet *ifp = adapter->ifp;
1043 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1045 #if __FreeBSD_version < 800000
1048 if_maddr_rlock(ifp);
1050 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1051 if (ifma->ifma_addr->sa_family != AF_LINK)
1053 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1054 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1055 IXGBE_ETH_LENGTH_OF_ADDRESS);
1058 #if __FreeBSD_version < 800000
1059 IF_ADDR_UNLOCK(ifp);
1061 if_maddr_runlock(ifp);
1066 ixgbe_update_mc_addr_list(&adapter->hw,
1067 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1073 * This is an iterator function now needed by the multicast
1074 * shared code. It simply feeds the shared code routine the
1075 * addresses in the array of ixv_set_multi() one by one.
1078 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1080 u8 *addr = *update_ptr;
1084 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1085 *update_ptr = newptr;
1089 /*********************************************************************
1092 * This routine checks for link status,updates statistics,
1093 * and runs the watchdog check.
1095 **********************************************************************/
1098 ixv_local_timer(void *arg)
1100 struct adapter *adapter = arg;
1101 device_t dev = adapter->dev;
1102 struct ix_queue *que = adapter->queues;
1106 mtx_assert(&adapter->core_mtx, MA_OWNED);
1108 ixv_update_link_status(adapter);
1111 ixv_update_stats(adapter);
1114 ** Check the TX queues status
1115 ** - mark hung queues so we don't schedule on them
1116 ** - watchdog only if all queues show hung
1118 for (int i = 0; i < adapter->num_queues; i++, que++) {
1119 /* Keep track of queues with work for soft irq */
1121 queues |= ((u64)1 << que->me);
1123 ** Each time txeof runs without cleaning, but there
1124 ** are uncleaned descriptors it increments busy. If
1125 ** we get to the MAX we declare it hung.
1127 if (que->busy == IXGBE_QUEUE_HUNG) {
1129 /* Mark the queue as inactive */
1130 adapter->active_queues &= ~((u64)1 << que->me);
1133 /* Check if we've come back from hung */
1134 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1135 adapter->active_queues |= ((u64)1 << que->me);
1137 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1138 device_printf(dev,"Warning queue %d "
1139 "appears to be hung!\n", i);
1140 que->txr->busy = IXGBE_QUEUE_HUNG;
1146 /* Only truely watchdog if all queues show hung */
1147 if (hung == adapter->num_queues)
1149 else if (queues != 0) { /* Force an IRQ on queues with work */
1150 ixv_rearm_queues(adapter, queues);
1153 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1157 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1158 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1159 adapter->watchdog_events++;
1160 ixv_init_locked(adapter);
1164 ** Note: this routine updates the OS on the link state
1165 ** the real check of the hardware only happens with
1166 ** a link interrupt.
1169 ixv_update_link_status(struct adapter *adapter)
1171 struct ifnet *ifp = adapter->ifp;
1172 device_t dev = adapter->dev;
1174 if (adapter->link_up){
1175 if (adapter->link_active == FALSE) {
1177 device_printf(dev,"Link is up %d Gbps %s \n",
1178 ((adapter->link_speed == 128)? 10:1),
1180 adapter->link_active = TRUE;
1181 if_link_state_change(ifp, LINK_STATE_UP);
1183 } else { /* Link down */
1184 if (adapter->link_active == TRUE) {
1186 device_printf(dev,"Link is Down\n");
1187 if_link_state_change(ifp, LINK_STATE_DOWN);
1188 adapter->link_active = FALSE;
1196 /*********************************************************************
1198 * This routine disables all traffic on the adapter by issuing a
1199 * global reset on the MAC and deallocates TX/RX buffers.
1201 **********************************************************************/
1207 struct adapter *adapter = arg;
1208 struct ixgbe_hw *hw = &adapter->hw;
1211 mtx_assert(&adapter->core_mtx, MA_OWNED);
1213 INIT_DEBUGOUT("ixv_stop: begin\n");
1214 ixv_disable_intr(adapter);
1216 /* Tell the stack that the interface is no longer active */
1217 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1220 adapter->hw.adapter_stopped = FALSE;
1221 ixgbe_stop_adapter(hw);
1222 callout_stop(&adapter->timer);
1224 /* reprogram the RAR[0] in case user changed it. */
1225 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1231 /*********************************************************************
1233 * Determine hardware revision.
1235 **********************************************************************/
1237 ixv_identify_hardware(struct adapter *adapter)
1239 device_t dev = adapter->dev;
1240 struct ixgbe_hw *hw = &adapter->hw;
1243 ** Make sure BUSMASTER is set, on a VM under
1244 ** KVM it may not be and will break things.
1246 pci_enable_busmaster(dev);
1248 /* Save off the information about this board */
1249 hw->vendor_id = pci_get_vendor(dev);
1250 hw->device_id = pci_get_device(dev);
1251 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1252 hw->subsystem_vendor_id =
1253 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1254 hw->subsystem_device_id =
1255 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1257 /* We need this to determine device-specific things */
1258 ixgbe_set_mac_type(hw);
1260 /* Set the right number of segments */
1261 adapter->num_segs = IXGBE_82599_SCATTER;
1266 /*********************************************************************
1268 * Setup MSIX Interrupt resources and handlers
1270 **********************************************************************/
1272 ixv_allocate_msix(struct adapter *adapter)
1274 device_t dev = adapter->dev;
1275 struct ix_queue *que = adapter->queues;
1276 struct tx_ring *txr = adapter->tx_rings;
1277 int error, rid, vector = 0;
1279 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1281 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1282 RF_SHAREABLE | RF_ACTIVE);
1283 if (que->res == NULL) {
1284 device_printf(dev,"Unable to allocate"
1285 " bus resource: que interrupt [%d]\n", vector);
1288 /* Set the handler function */
1289 error = bus_setup_intr(dev, que->res,
1290 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1291 ixv_msix_que, que, &que->tag);
1294 device_printf(dev, "Failed to register QUE handler");
1297 #if __FreeBSD_version >= 800504
1298 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1301 adapter->active_queues |= (u64)(1 << que->msix);
1303 ** Bind the msix vector, and thus the
1304 ** ring to the corresponding cpu.
1306 if (adapter->num_queues > 1)
1307 bus_bind_intr(dev, que->res, i);
1308 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1309 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1310 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1311 taskqueue_thread_enqueue, &que->tq);
1312 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1313 device_get_nameunit(adapter->dev));
1318 adapter->res = bus_alloc_resource_any(dev,
1319 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1320 if (!adapter->res) {
1321 device_printf(dev,"Unable to allocate"
1322 " bus resource: MBX interrupt [%d]\n", rid);
1325 /* Set the mbx handler function */
1326 error = bus_setup_intr(dev, adapter->res,
1327 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1328 ixv_msix_mbx, adapter, &adapter->tag);
1330 adapter->res = NULL;
1331 device_printf(dev, "Failed to register LINK handler");
1334 #if __FreeBSD_version >= 800504
1335 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1337 adapter->vector = vector;
1338 /* Tasklets for Mailbox */
1339 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1340 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1341 taskqueue_thread_enqueue, &adapter->tq);
1342 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1343 device_get_nameunit(adapter->dev));
1345 ** Due to a broken design QEMU will fail to properly
1346 ** enable the guest for MSIX unless the vectors in
1347 ** the table are all set up, so we must rewrite the
1348 ** ENABLE in the MSIX control register again at this
1349 ** point to cause it to successfully initialize us.
1351 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1353 pci_find_cap(dev, PCIY_MSIX, &rid);
1354 rid += PCIR_MSIX_CTRL;
1355 msix_ctrl = pci_read_config(dev, rid, 2);
1356 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1357 pci_write_config(dev, rid, msix_ctrl, 2);
1364 * Setup MSIX resources, note that the VF
1365 * device MUST use MSIX, there is no fallback.
1368 ixv_setup_msix(struct adapter *adapter)
1370 device_t dev = adapter->dev;
1371 int rid, want, msgs;
1374 /* Must have at least 2 MSIX vectors */
1375 msgs = pci_msix_count(dev);
1379 adapter->msix_mem = bus_alloc_resource_any(dev,
1380 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1381 if (adapter->msix_mem == NULL) {
1382 device_printf(adapter->dev,
1383 "Unable to map MSIX table \n");
1388 ** Want vectors for the queues,
1389 ** plus an additional for mailbox.
1391 want = adapter->num_queues + 1;
1394 adapter->num_queues = msgs - 1;
1397 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1398 device_printf(adapter->dev,
1399 "Using MSIX interrupts with %d vectors\n", want);
1402 /* Release in case alloc was insufficient */
1403 pci_release_msi(dev);
1405 if (adapter->msix_mem != NULL) {
1406 bus_release_resource(dev, SYS_RES_MEMORY,
1407 rid, adapter->msix_mem);
1408 adapter->msix_mem = NULL;
1410 device_printf(adapter->dev,"MSIX config error\n");
1416 ixv_allocate_pci_resources(struct adapter *adapter)
1419 device_t dev = adapter->dev;
1422 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1425 if (!(adapter->pci_mem)) {
1426 device_printf(dev,"Unable to allocate bus resource: memory\n");
1430 adapter->osdep.mem_bus_space_tag =
1431 rman_get_bustag(adapter->pci_mem);
1432 adapter->osdep.mem_bus_space_handle =
1433 rman_get_bushandle(adapter->pci_mem);
1434 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1436 /* Pick up the tuneable queues */
1437 adapter->num_queues = ixv_num_queues;
1439 adapter->hw.back = &adapter->osdep;
1442 ** Now setup MSI/X, should
1443 ** return us the number of
1444 ** configured vectors.
1446 adapter->msix = ixv_setup_msix(adapter);
1447 if (adapter->msix == ENXIO)
1454 ixv_free_pci_resources(struct adapter * adapter)
1456 struct ix_queue *que = adapter->queues;
1457 device_t dev = adapter->dev;
1460 memrid = PCIR_BAR(MSIX_82598_BAR);
1463 ** There is a slight possibility of a failure mode
1464 ** in attach that will result in entering this function
1465 ** before interrupt resources have been initialized, and
1466 ** in that case we do not want to execute the loops below
1467 ** We can detect this reliably by the state of the adapter
1470 if (adapter->res == NULL)
1474 ** Release all msix queue resources:
1476 for (int i = 0; i < adapter->num_queues; i++, que++) {
1477 rid = que->msix + 1;
1478 if (que->tag != NULL) {
1479 bus_teardown_intr(dev, que->res, que->tag);
1482 if (que->res != NULL)
1483 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1487 /* Clean the Legacy or Link interrupt last */
1488 if (adapter->vector) /* we are doing MSIX */
1489 rid = adapter->vector + 1;
1491 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1493 if (adapter->tag != NULL) {
1494 bus_teardown_intr(dev, adapter->res, adapter->tag);
1495 adapter->tag = NULL;
1497 if (adapter->res != NULL)
1498 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1502 pci_release_msi(dev);
1504 if (adapter->msix_mem != NULL)
1505 bus_release_resource(dev, SYS_RES_MEMORY,
1506 memrid, adapter->msix_mem);
1508 if (adapter->pci_mem != NULL)
1509 bus_release_resource(dev, SYS_RES_MEMORY,
1510 PCIR_BAR(0), adapter->pci_mem);
1515 /*********************************************************************
1517 * Setup networking device structure and register an interface.
1519 **********************************************************************/
1521 ixv_setup_interface(device_t dev, struct adapter *adapter)
1525 INIT_DEBUGOUT("ixv_setup_interface: begin");
1527 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1529 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1530 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1531 ifp->if_baudrate = 1000000000;
1532 ifp->if_init = ixv_init;
1533 ifp->if_softc = adapter;
1534 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1535 ifp->if_ioctl = ixv_ioctl;
1536 #if __FreeBSD_version >= 800000
1537 ifp->if_transmit = ixgbe_mq_start;
1538 ifp->if_qflush = ixgbe_qflush;
1540 ifp->if_start = ixgbe_start;
1542 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1544 ether_ifattach(ifp, adapter->hw.mac.addr);
1546 adapter->max_frame_size =
1547 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1550 * Tell the upper layer(s) we support long frames.
1552 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1554 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1555 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1556 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1559 ifp->if_capabilities |= IFCAP_LRO;
1560 ifp->if_capenable = ifp->if_capabilities;
1563 * Specify the media types supported by this adapter and register
1564 * callbacks to update media and link information
1566 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1568 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1569 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1570 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1576 ixv_config_link(struct adapter *adapter)
1578 struct ixgbe_hw *hw = &adapter->hw;
1579 u32 autoneg, err = 0;
1581 if (hw->mac.ops.check_link)
1582 err = hw->mac.ops.check_link(hw, &autoneg,
1583 &adapter->link_up, FALSE);
1587 if (hw->mac.ops.setup_link)
1588 err = hw->mac.ops.setup_link(hw,
1589 autoneg, adapter->link_up);
1595 /*********************************************************************
1597 * Enable transmit unit.
1599 **********************************************************************/
1601 ixv_initialize_transmit_units(struct adapter *adapter)
1603 struct tx_ring *txr = adapter->tx_rings;
1604 struct ixgbe_hw *hw = &adapter->hw;
1607 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1608 u64 tdba = txr->txdma.dma_paddr;
1611 /* Set WTHRESH to 8, burst writeback */
1612 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1613 txdctl |= (8 << 16);
1614 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1616 /* Set the HW Tx Head and Tail indices */
1617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1620 /* Set Tx Tail register */
1621 txr->tail = IXGBE_VFTDT(i);
1623 /* Set Ring parameters */
1624 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1625 (tdba & 0x00000000ffffffffULL));
1626 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1627 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1628 adapter->num_tx_desc *
1629 sizeof(struct ixgbe_legacy_tx_desc));
1630 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1631 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1632 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1635 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1636 txdctl |= IXGBE_TXDCTL_ENABLE;
1637 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1644 /*********************************************************************
1646 * Setup receive registers and features.
1648 **********************************************************************/
1649 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1652 ixv_initialize_receive_units(struct adapter *adapter)
1654 struct rx_ring *rxr = adapter->rx_rings;
1655 struct ixgbe_hw *hw = &adapter->hw;
1656 struct ifnet *ifp = adapter->ifp;
1657 u32 bufsz, rxcsum, psrtype;
1660 if (ifp->if_mtu > ETHERMTU)
1661 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1663 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1665 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1666 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1667 IXGBE_PSRTYPE_L2HDR;
1669 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1671 /* Tell PF our expected packet-size */
1672 max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
1673 ixgbevf_rlpml_set_vf(hw, max_frame);
1675 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1676 u64 rdba = rxr->rxdma.dma_paddr;
1679 /* Disable the queue */
1680 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1681 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
1682 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1683 for (int j = 0; j < 10; j++) {
1684 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1685 IXGBE_RXDCTL_ENABLE)
1691 /* Setup the Base and Length of the Rx Descriptor Ring */
1692 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1693 (rdba & 0x00000000ffffffffULL));
1694 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1696 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1697 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1699 /* Reset the ring indices */
1700 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1701 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1703 /* Set up the SRRCTL register */
1704 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1705 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1706 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1708 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1709 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1711 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1713 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1714 adapter->num_rx_desc - 1);
1716 /* Set Rx Tail register */
1717 rxr->tail = IXGBE_VFRDT(rxr->me);
1719 /* Do the queue enabling last */
1720 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1721 rxdctl |= IXGBE_RXDCTL_ENABLE;
1722 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1723 for (int k = 0; k < 10; k++) {
1724 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1725 IXGBE_RXDCTL_ENABLE)
1732 /* Set the Tail Pointer */
1735 * In netmap mode, we must preserve the buffers made
1736 * available to userspace before the if_init()
1737 * (this is true by default on the TX side, because
1738 * init makes all buffers available to userspace).
1740 * netmap_reset() and the device specific routines
1741 * (e.g. ixgbe_setup_receive_rings()) map these
1742 * buffers at the end of the NIC ring, so here we
1743 * must set the RDT (tail) register to make sure
1744 * they are not overwritten.
1746 * In this driver the NIC ring starts at RDH = 0,
1747 * RDT points to the last slot available for reception (?),
1748 * so RDT = num_rx_desc - 1 means the whole ring is available.
1750 if (ifp->if_capenable & IFCAP_NETMAP) {
1751 struct netmap_adapter *na = NA(adapter->ifp);
1752 struct netmap_kring *kring = &na->rx_rings[i];
1753 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1755 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1757 #endif /* DEV_NETMAP */
1758 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1759 adapter->num_rx_desc - 1);
1762 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1764 if (ifp->if_capenable & IFCAP_RXCSUM)
1765 rxcsum |= IXGBE_RXCSUM_PCSD;
1767 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1768 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1770 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1776 ixv_setup_vlan_support(struct adapter *adapter)
1778 struct ixgbe_hw *hw = &adapter->hw;
1779 u32 ctrl, vid, vfta, retry;
1783 ** We get here thru init_locked, meaning
1784 ** a soft reset, this has already cleared
1785 ** the VFTA and other state, so if there
1786 ** have been no vlan's registered do nothing.
1788 if (adapter->num_vlans == 0)
1791 /* Enable the queues */
1792 for (int i = 0; i < adapter->num_queues; i++) {
1793 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1794 ctrl |= IXGBE_RXDCTL_VME;
1795 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1799 ** A soft reset zero's out the VFTA, so
1800 ** we need to repopulate it now.
1802 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1803 if (ixv_shadow_vfta[i] == 0)
1805 vfta = ixv_shadow_vfta[i];
1807 ** Reconstruct the vlan id's
1808 ** based on the bits set in each
1809 ** of the array ints.
1811 for ( int j = 0; j < 32; j++) {
1813 if ((vfta & (1 << j)) == 0)
1816 /* Call the shared code mailbox routine */
1817 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1826 ** This routine is run via an vlan config EVENT,
1827 ** it enables us to use the HW Filter table since
1828 ** we can get the vlan id. This just creates the
1829 ** entry in the soft version of the VFTA, init will
1830 ** repopulate the real table.
1833 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1835 struct adapter *adapter = ifp->if_softc;
1838 if (ifp->if_softc != arg) /* Not our event */
1841 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1844 IXGBE_CORE_LOCK(adapter);
1845 index = (vtag >> 5) & 0x7F;
1847 ixv_shadow_vfta[index] |= (1 << bit);
1848 ++adapter->num_vlans;
1849 /* Re-init to load the changes */
1850 ixv_init_locked(adapter);
1851 IXGBE_CORE_UNLOCK(adapter);
1855 ** This routine is run via an vlan
1856 ** unconfig EVENT, remove our entry
1857 ** in the soft vfta.
1860 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1862 struct adapter *adapter = ifp->if_softc;
1865 if (ifp->if_softc != arg)
1868 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1871 IXGBE_CORE_LOCK(adapter);
1872 index = (vtag >> 5) & 0x7F;
1874 ixv_shadow_vfta[index] &= ~(1 << bit);
1875 --adapter->num_vlans;
1876 /* Re-init to load the changes */
1877 ixv_init_locked(adapter);
1878 IXGBE_CORE_UNLOCK(adapter);
1882 ixv_enable_intr(struct adapter *adapter)
1884 struct ixgbe_hw *hw = &adapter->hw;
1885 struct ix_queue *que = adapter->queues;
1886 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1889 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1891 mask = IXGBE_EIMS_ENABLE_MASK;
1892 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1893 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1895 for (int i = 0; i < adapter->num_queues; i++, que++)
1896 ixv_enable_queue(adapter, que->msix);
1898 IXGBE_WRITE_FLUSH(hw);
1904 ixv_disable_intr(struct adapter *adapter)
1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1907 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1908 IXGBE_WRITE_FLUSH(&adapter->hw);
1913 ** Setup the correct IVAR register for a particular MSIX interrupt
1914 ** - entry is the register array entry
1915 ** - vector is the MSIX vector for this queue
1916 ** - type is RX/TX/MISC
1919 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1921 struct ixgbe_hw *hw = &adapter->hw;
1924 vector |= IXGBE_IVAR_ALLOC_VAL;
1926 if (type == -1) { /* MISC IVAR */
1927 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1930 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1931 } else { /* RX/TX IVARS */
1932 index = (16 * (entry & 1)) + (8 * type);
1933 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1934 ivar &= ~(0xFF << index);
1935 ivar |= (vector << index);
1936 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1941 ixv_configure_ivars(struct adapter *adapter)
1943 struct ix_queue *que = adapter->queues;
1945 for (int i = 0; i < adapter->num_queues; i++, que++) {
1946 /* First the RX queue entry */
1947 ixv_set_ivar(adapter, i, que->msix, 0);
1948 /* ... and the TX */
1949 ixv_set_ivar(adapter, i, que->msix, 1);
1950 /* Set an initial value in EITR */
1951 IXGBE_WRITE_REG(&adapter->hw,
1952 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1955 /* For the mailbox interrupt */
1956 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1961 ** Tasklet handler for MSIX MBX interrupts
1962 ** - do outside interrupt since it might sleep
1965 ixv_handle_mbx(void *context, int pending)
1967 struct adapter *adapter = context;
1969 ixgbe_check_link(&adapter->hw,
1970 &adapter->link_speed, &adapter->link_up, 0);
1971 ixv_update_link_status(adapter);
1975 ** The VF stats registers never have a truely virgin
1976 ** starting point, so this routine tries to make an
1977 ** artificial one, marking ground zero on attach as
1981 ixv_save_stats(struct adapter *adapter)
1983 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1984 adapter->stats.vf.saved_reset_vfgprc +=
1985 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1986 adapter->stats.vf.saved_reset_vfgptc +=
1987 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1988 adapter->stats.vf.saved_reset_vfgorc +=
1989 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1990 adapter->stats.vf.saved_reset_vfgotc +=
1991 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1992 adapter->stats.vf.saved_reset_vfmprc +=
1993 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1998 ixv_init_stats(struct adapter *adapter)
2000 struct ixgbe_hw *hw = &adapter->hw;
2002 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2003 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2004 adapter->stats.vf.last_vfgorc |=
2005 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2007 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2008 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2009 adapter->stats.vf.last_vfgotc |=
2010 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2012 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2014 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2015 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2016 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2017 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2018 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2021 #define UPDATE_STAT_32(reg, last, count) \
2023 u32 current = IXGBE_READ_REG(hw, reg); \
2024 if (current < last) \
2025 count += 0x100000000LL; \
2027 count &= 0xFFFFFFFF00000000LL; \
2031 #define UPDATE_STAT_36(lsb, msb, last, count) \
2033 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2034 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2035 u64 current = ((cur_msb << 32) | cur_lsb); \
2036 if (current < last) \
2037 count += 0x1000000000LL; \
2039 count &= 0xFFFFFFF000000000LL; \
2044 ** ixv_update_stats - Update the board statistics counters.
2047 ixv_update_stats(struct adapter *adapter)
2049 struct ixgbe_hw *hw = &adapter->hw;
2051 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2052 adapter->stats.vf.vfgprc);
2053 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2054 adapter->stats.vf.vfgptc);
2055 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2056 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2057 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2058 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2059 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2060 adapter->stats.vf.vfmprc);
2064 * Add statistic sysctls for the VF.
2067 ixv_add_stats_sysctls(struct adapter *adapter)
2069 device_t dev = adapter->dev;
2070 struct ix_queue *que = &adapter->queues[0];
2071 struct tx_ring *txr = que->txr;
2072 struct rx_ring *rxr = que->rxr;
2074 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2075 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2076 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2077 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2079 struct sysctl_oid *stat_node, *queue_node;
2080 struct sysctl_oid_list *stat_list, *queue_list;
2082 /* Driver Statistics */
2083 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2084 CTLFLAG_RD, &adapter->dropped_pkts,
2085 "Driver dropped packets");
2086 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2087 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2088 "m_defrag() failed");
2089 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2090 CTLFLAG_RD, &adapter->watchdog_events,
2091 "Watchdog timeouts");
2093 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2095 "VF Statistics (read from HW registers)");
2096 stat_list = SYSCTL_CHILDREN(stat_node);
2098 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2099 CTLFLAG_RD, &stats->vfgprc,
2100 "Good Packets Received");
2101 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2102 CTLFLAG_RD, &stats->vfgorc,
2103 "Good Octets Received");
2104 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2105 CTLFLAG_RD, &stats->vfmprc,
2106 "Multicast Packets Received");
2107 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2108 CTLFLAG_RD, &stats->vfgptc,
2109 "Good Packets Transmitted");
2110 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2111 CTLFLAG_RD, &stats->vfgotc,
2112 "Good Octets Transmitted");
2114 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2116 "Queue Statistics (collected by SW)");
2117 queue_list = SYSCTL_CHILDREN(queue_node);
2119 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2120 CTLFLAG_RD, &(que->irqs),
2122 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2123 CTLFLAG_RD, &(rxr->rx_irq),
2124 "RX irqs on queue");
2125 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2126 CTLFLAG_RD, &(rxr->rx_packets),
2128 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2129 CTLFLAG_RD, &(rxr->rx_bytes),
2131 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2132 CTLFLAG_RD, &(rxr->rx_discarded),
2133 "Discarded RX packets");
2135 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2136 CTLFLAG_RD, &(txr->total_packets),
2138 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2139 CTLFLAG_RD, &(txr->bytes), 0,
2141 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2142 CTLFLAG_RD, &(txr->no_desc_avail),
2143 "# of times not enough descriptors were available during TX");
2147 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2148 const char *description, int *limit, int value)
2151 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2152 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2153 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2156 /**********************************************************************
2158 * This routine is called only when em_display_debug_stats is enabled.
2159 * This routine provides a way to take a look at important statistics
2160 * maintained by the driver and hardware.
2162 **********************************************************************/
2164 ixv_print_debug_info(struct adapter *adapter)
2166 device_t dev = adapter->dev;
2167 struct ixgbe_hw *hw = &adapter->hw;
2168 struct ix_queue *que = adapter->queues;
2169 struct rx_ring *rxr;
2170 struct tx_ring *txr;
2171 struct lro_ctrl *lro;
2173 device_printf(dev,"Error Byte Count = %u \n",
2174 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2176 for (int i = 0; i < adapter->num_queues; i++, que++) {
2180 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2181 que->msix, (long)que->irqs);
2182 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2183 rxr->me, (long long)rxr->rx_packets);
2184 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2185 rxr->me, (long)rxr->rx_bytes);
2186 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2187 rxr->me, lro->lro_queued);
2188 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2189 rxr->me, lro->lro_flushed);
2190 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2191 txr->me, (long)txr->total_packets);
2192 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2193 txr->me, (long)txr->no_desc_avail);
2196 device_printf(dev,"MBX IRQ Handled: %lu\n",
2197 (long)adapter->link_irq);
2202 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2205 struct adapter *adapter;
2208 error = sysctl_handle_int(oidp, &result, 0, req);
2210 if (error || !req->newptr)
2214 adapter = (struct adapter *) arg1;
2215 ixv_print_debug_info(adapter);