1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
45 *********************************************************************/
46 char ixv_driver_version[] = "1.4.6-k";
48 /*********************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64 /* required last entry */
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
72 static char *ixv_strings[] = {
73 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 /*********************************************************************
78 *********************************************************************/
79 static int ixv_probe(device_t);
80 static int ixv_attach(device_t);
81 static int ixv_detach(device_t);
82 static int ixv_shutdown(device_t);
83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void ixv_init(void *);
85 static void ixv_init_locked(struct adapter *);
86 static void ixv_stop(void *);
87 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int ixv_media_change(struct ifnet *);
89 static void ixv_identify_hardware(struct adapter *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_setup_msix(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
96 static void ixv_config_link(struct adapter *);
98 static void ixv_initialize_transmit_units(struct adapter *);
99 static void ixv_initialize_receive_units(struct adapter *);
101 static void ixv_enable_intr(struct adapter *);
102 static void ixv_disable_intr(struct adapter *);
103 static void ixv_set_multi(struct adapter *);
104 static void ixv_update_link_status(struct adapter *);
105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void ixv_configure_ivars(struct adapter *);
108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110 static void ixv_setup_vlan_support(struct adapter *);
111 static void ixv_register_vlan(void *, struct ifnet *, u16);
112 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
114 static void ixv_save_stats(struct adapter *);
115 static void ixv_init_stats(struct adapter *);
116 static void ixv_update_stats(struct adapter *);
117 static void ixv_add_stats_sysctls(struct adapter *);
118 static void ixv_set_sysctl_value(struct adapter *, const char *,
119 const char *, int *, int);
121 /* The MSI/X Interrupt handlers */
122 static void ixv_msix_que(void *);
123 static void ixv_msix_mbx(void *);
125 /* Deferred interrupt tasklets */
126 static void ixv_handle_que(void *, int);
127 static void ixv_handle_mbx(void *, int);
131 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
134 extern void ixgbe_netmap_attach(struct adapter *adapter);
136 #include <net/netmap.h>
137 #include <sys/selinfo.h>
138 #include <dev/netmap/netmap_kern.h>
139 #endif /* DEV_NETMAP */
141 /*********************************************************************
142 * FreeBSD Device Interface Entry Points
143 *********************************************************************/
145 static device_method_t ixv_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, ixv_probe),
148 DEVMETHOD(device_attach, ixv_attach),
149 DEVMETHOD(device_detach, ixv_detach),
150 DEVMETHOD(device_shutdown, ixv_shutdown),
154 static driver_t ixv_driver = {
155 "ixv", ixv_methods, sizeof(struct adapter),
158 devclass_t ixv_devclass;
159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160 MODULE_DEPEND(ixv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 MODULE_DEPEND(ix, netmap, 1, 1, 1);
164 #endif /* DEV_NETMAP */
165 /* XXX depend on 'ix' ? */
168 ** TUNEABLE PARAMETERS:
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
176 ** AIM: Adaptive Interrupt Moderation
177 ** which means that the interrupt rate
178 ** is varied over time based on the
179 ** traffic for that interrupt vector
181 static int ixv_enable_aim = FALSE;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
192 /* Flow control setting, default to full */
193 static int ixv_flow_control = ixgbe_fc_full;
194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
197 * Header split: this causes the hardware to DMA
198 * the header into a seperate mbuf from the payload,
199 * it can be a performance win in some workloads, but
200 * in others it actually hurts, its off by default.
202 static int ixv_header_split = FALSE;
203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
206 ** Number of TX descriptors per ring,
207 ** setting higher than RX as this seems
208 ** the better performing choice.
210 static int ixv_txd = DEFAULT_TXD;
211 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
213 /* Number of RX descriptors per ring */
214 static int ixv_rxd = DEFAULT_RXD;
215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
218 ** Shadow VFTA table, this is needed because
219 ** the real filter table gets cleared during
220 ** a soft reset and we need to repopulate it.
222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
224 /*********************************************************************
225 * Device identification routine
227 * ixv_probe determines if the driver should be loaded on
228 * adapter based on PCI vendor/device id of the adapter.
230 * return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
234 ixv_probe(device_t dev)
236 ixgbe_vendor_info_t *ent;
238 u16 pci_vendor_id = 0;
239 u16 pci_device_id = 0;
240 u16 pci_subvendor_id = 0;
241 u16 pci_subdevice_id = 0;
242 char adapter_name[256];
245 pci_vendor_id = pci_get_vendor(dev);
246 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
249 pci_device_id = pci_get_device(dev);
250 pci_subvendor_id = pci_get_subvendor(dev);
251 pci_subdevice_id = pci_get_subdevice(dev);
253 ent = ixv_vendor_info_array;
254 while (ent->vendor_id != 0) {
255 if ((pci_vendor_id == ent->vendor_id) &&
256 (pci_device_id == ent->device_id) &&
258 ((pci_subvendor_id == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
261 ((pci_subdevice_id == ent->subdevice_id) ||
262 (ent->subdevice_id == 0))) {
263 sprintf(adapter_name, "%s, Version - %s",
264 ixv_strings[ent->index],
266 device_set_desc_copy(dev, adapter_name);
267 return (BUS_PROBE_DEFAULT);
274 /*********************************************************************
275 * Device initialization routine
277 * The attach entry point is called when the driver is being loaded.
278 * This routine identifies the type of hardware, allocates all resources
279 * and initializes the hardware.
281 * return 0 on success, positive on failure
282 *********************************************************************/
285 ixv_attach(device_t dev)
287 struct adapter *adapter;
291 INIT_DEBUGOUT("ixv_attach: begin");
293 /* Allocate, clear, and link in our adapter structure */
294 adapter = device_get_softc(dev);
299 adapter->init_locked = ixv_init_locked;
300 adapter->stop_locked = ixv_stop;
304 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314 OID_AUTO, "enable_aim", CTLFLAG_RW,
315 &ixv_enable_aim, 1, "Interrupt Moderation");
317 /* Set up the timer callout */
318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
320 /* Determine hardware revision */
321 ixv_identify_hardware(adapter);
323 /* Do base PCI setup - map BAR0 */
324 if (ixv_allocate_pci_resources(adapter)) {
325 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
330 /* Sysctls for limiting the amount of work done in the taskqueues */
331 ixv_set_sysctl_value(adapter, "rx_processing_limit",
332 "max number of rx packets to process",
333 &adapter->rx_process_limit, ixv_rx_process_limit);
335 ixv_set_sysctl_value(adapter, "tx_processing_limit",
336 "max number of tx packets to process",
337 &adapter->tx_process_limit, ixv_tx_process_limit);
339 /* Sysctls for limiting the amount of work done in the taskqueues */
340 ixv_set_sysctl_value(adapter, "rx_processing_limit",
341 "max number of rx packets to process",
342 &adapter->rx_process_limit, ixv_rx_process_limit);
344 ixv_set_sysctl_value(adapter, "tx_processing_limit",
345 "max number of tx packets to process",
346 &adapter->tx_process_limit, ixv_tx_process_limit);
348 /* Do descriptor calc and sanity checks */
349 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351 device_printf(dev, "TXD config issue, using default!\n");
352 adapter->num_tx_desc = DEFAULT_TXD;
354 adapter->num_tx_desc = ixv_txd;
356 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358 device_printf(dev, "RXD config issue, using default!\n");
359 adapter->num_rx_desc = DEFAULT_RXD;
361 adapter->num_rx_desc = ixv_rxd;
363 /* Allocate our TX/RX Queues */
364 if (ixgbe_allocate_queues(adapter)) {
365 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
371 ** Initialize the shared code: its
372 ** at this point the mac type is set.
374 error = ixgbe_init_shared_code(hw);
376 device_printf(dev, "ixgbe_init_shared_code() failed!\n");
381 /* Setup the mailbox */
382 ixgbe_init_mbx_params_vf(hw);
384 /* Reset mbox api to 1.0 */
385 error = ixgbe_reset_hw(hw);
386 if (error == IXGBE_ERR_RESET_FAILED)
387 device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
389 device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
395 /* Negotiate mailbox API version */
396 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
398 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
403 error = ixgbe_init_hw(hw);
405 device_printf(dev, "ixgbe_init_hw() failed!\n");
410 error = ixv_allocate_msix(adapter);
412 device_printf(dev, "ixv_allocate_msix() failed!\n");
416 /* If no mac address was assigned, make a random one */
417 if (!ixv_check_ether_addr(hw->mac.addr)) {
418 u8 addr[ETHER_ADDR_LEN];
419 arc4rand(&addr, sizeof(addr), 0);
422 bcopy(addr, hw->mac.addr, sizeof(addr));
425 /* Setup OS specific network interface */
426 ixv_setup_interface(dev, adapter);
428 /* Do the stats setup */
429 ixv_save_stats(adapter);
430 ixv_init_stats(adapter);
431 ixv_add_stats_sysctls(adapter);
433 /* Register for VLAN events */
434 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
435 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
436 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
437 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
440 ixgbe_netmap_attach(adapter);
441 #endif /* DEV_NETMAP */
442 INIT_DEBUGOUT("ixv_attach: end");
446 ixgbe_free_transmit_structures(adapter);
447 ixgbe_free_receive_structures(adapter);
449 ixv_free_pci_resources(adapter);
454 /*********************************************************************
455 * Device removal routine
457 * The detach entry point is called when the driver is being removed.
458 * This routine stops the adapter and deallocates all the resources
459 * that were allocated for driver operation.
461 * return 0 on success, positive on failure
462 *********************************************************************/
465 ixv_detach(device_t dev)
467 struct adapter *adapter = device_get_softc(dev);
468 struct ix_queue *que = adapter->queues;
470 INIT_DEBUGOUT("ixv_detach: begin");
472 /* Make sure VLANS are not using driver */
473 if (adapter->ifp->if_vlantrunk != NULL) {
474 device_printf(dev, "Vlan in use, detach first\n");
478 IXGBE_CORE_LOCK(adapter);
480 IXGBE_CORE_UNLOCK(adapter);
482 for (int i = 0; i < adapter->num_queues; i++, que++) {
484 struct tx_ring *txr = que->txr;
485 taskqueue_drain(que->tq, &txr->txq_task);
486 taskqueue_drain(que->tq, &que->que_task);
487 taskqueue_free(que->tq);
491 /* Drain the Mailbox(link) queue */
493 taskqueue_drain(adapter->tq, &adapter->link_task);
494 taskqueue_free(adapter->tq);
497 /* Unregister VLAN events */
498 if (adapter->vlan_attach != NULL)
499 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
500 if (adapter->vlan_detach != NULL)
501 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
503 ether_ifdetach(adapter->ifp);
504 callout_drain(&adapter->timer);
506 netmap_detach(adapter->ifp);
507 #endif /* DEV_NETMAP */
508 ixv_free_pci_resources(adapter);
509 bus_generic_detach(dev);
510 if_free(adapter->ifp);
512 ixgbe_free_transmit_structures(adapter);
513 ixgbe_free_receive_structures(adapter);
515 IXGBE_CORE_LOCK_DESTROY(adapter);
519 /*********************************************************************
521 * Shutdown entry point
523 **********************************************************************/
525 ixv_shutdown(device_t dev)
527 struct adapter *adapter = device_get_softc(dev);
528 IXGBE_CORE_LOCK(adapter);
530 IXGBE_CORE_UNLOCK(adapter);
535 /*********************************************************************
538 * ixv_ioctl is called when the user wants to configure the
541 * return 0 on success, positive on failure
542 **********************************************************************/
545 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
547 struct adapter *adapter = ifp->if_softc;
548 struct ifreq *ifr = (struct ifreq *) data;
549 #if defined(INET) || defined(INET6)
550 struct ifaddr *ifa = (struct ifaddr *) data;
551 bool avoid_reset = FALSE;
559 if (ifa->ifa_addr->sa_family == AF_INET)
563 if (ifa->ifa_addr->sa_family == AF_INET6)
566 #if defined(INET) || defined(INET6)
568 ** Calling init results in link renegotiation,
569 ** so we avoid doing it when possible.
572 ifp->if_flags |= IFF_UP;
573 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
575 if (!(ifp->if_flags & IFF_NOARP))
576 arp_ifinit(ifp, ifa);
578 error = ether_ioctl(ifp, command, data);
582 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
583 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
586 IXGBE_CORE_LOCK(adapter);
587 ifp->if_mtu = ifr->ifr_mtu;
588 adapter->max_frame_size =
589 ifp->if_mtu + IXGBE_MTU_HDR;
590 ixv_init_locked(adapter);
591 IXGBE_CORE_UNLOCK(adapter);
595 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
596 IXGBE_CORE_LOCK(adapter);
597 if (ifp->if_flags & IFF_UP) {
598 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
599 ixv_init_locked(adapter);
601 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
603 adapter->if_flags = ifp->if_flags;
604 IXGBE_CORE_UNLOCK(adapter);
608 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
609 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
610 IXGBE_CORE_LOCK(adapter);
611 ixv_disable_intr(adapter);
612 ixv_set_multi(adapter);
613 ixv_enable_intr(adapter);
614 IXGBE_CORE_UNLOCK(adapter);
619 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
620 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
624 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
625 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
626 if (mask & IFCAP_HWCSUM)
627 ifp->if_capenable ^= IFCAP_HWCSUM;
628 if (mask & IFCAP_TSO4)
629 ifp->if_capenable ^= IFCAP_TSO4;
630 if (mask & IFCAP_LRO)
631 ifp->if_capenable ^= IFCAP_LRO;
632 if (mask & IFCAP_VLAN_HWTAGGING)
633 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
634 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
635 IXGBE_CORE_LOCK(adapter);
636 ixv_init_locked(adapter);
637 IXGBE_CORE_UNLOCK(adapter);
639 VLAN_CAPABILITIES(ifp);
644 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
645 error = ether_ioctl(ifp, command, data);
652 /*********************************************************************
655 * This routine is used in two ways. It is used by the stack as
656 * init entry point in network interface structure. It is also used
657 * by the driver as a hw/sw initialization routine to get to a
660 * return 0 on success, positive on failure
661 **********************************************************************/
662 #define IXGBE_MHADD_MFS_SHIFT 16
665 ixv_init_locked(struct adapter *adapter)
667 struct ifnet *ifp = adapter->ifp;
668 device_t dev = adapter->dev;
669 struct ixgbe_hw *hw = &adapter->hw;
672 INIT_DEBUGOUT("ixv_init_locked: begin");
673 mtx_assert(&adapter->core_mtx, MA_OWNED);
674 hw->adapter_stopped = FALSE;
675 ixgbe_stop_adapter(hw);
676 callout_stop(&adapter->timer);
678 /* reprogram the RAR[0] in case user changed it. */
679 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
681 /* Get the latest mac address, User can use a LAA */
682 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
683 IXGBE_ETH_LENGTH_OF_ADDRESS);
684 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
685 hw->addr_ctrl.rar_used_count = 1;
687 /* Prepare transmit descriptors and buffers */
688 if (ixgbe_setup_transmit_structures(adapter)) {
689 device_printf(dev, "Could not setup transmit structures\n");
694 /* Reset VF and renegotiate mailbox API version */
696 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
698 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
700 ixv_initialize_transmit_units(adapter);
702 /* Setup Multicast table */
703 ixv_set_multi(adapter);
706 ** Determine the correct mbuf pool
707 ** for doing jumbo/headersplit
709 if (ifp->if_mtu > ETHERMTU)
710 adapter->rx_mbuf_sz = MJUMPAGESIZE;
712 adapter->rx_mbuf_sz = MCLBYTES;
714 /* Prepare receive descriptors and buffers */
715 if (ixgbe_setup_receive_structures(adapter)) {
716 device_printf(dev, "Could not setup receive structures\n");
721 /* Configure RX settings */
722 ixv_initialize_receive_units(adapter);
724 /* Set the various hardware offload abilities */
725 ifp->if_hwassist = 0;
726 if (ifp->if_capenable & IFCAP_TSO4)
727 ifp->if_hwassist |= CSUM_TSO;
728 if (ifp->if_capenable & IFCAP_TXCSUM) {
729 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
730 #if __FreeBSD_version >= 800000
731 ifp->if_hwassist |= CSUM_SCTP;
735 /* Set up VLAN offload and filter */
736 ixv_setup_vlan_support(adapter);
738 /* Set up MSI/X routing */
739 ixv_configure_ivars(adapter);
741 /* Set up auto-mask */
742 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
744 /* Set moderation on the Link interrupt */
745 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
748 ixv_init_stats(adapter);
750 /* Config/Enable Link */
751 ixv_config_link(adapter);
754 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
756 /* And now turn on interrupts */
757 ixv_enable_intr(adapter);
759 /* Now inform the stack we're ready */
760 ifp->if_drv_flags |= IFF_DRV_RUNNING;
761 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
769 struct adapter *adapter = arg;
771 IXGBE_CORE_LOCK(adapter);
772 ixv_init_locked(adapter);
773 IXGBE_CORE_UNLOCK(adapter);
780 ** MSIX Interrupt Handlers and Tasklets
785 ixv_enable_queue(struct adapter *adapter, u32 vector)
787 struct ixgbe_hw *hw = &adapter->hw;
788 u32 queue = 1 << vector;
791 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
792 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
796 ixv_disable_queue(struct adapter *adapter, u32 vector)
798 struct ixgbe_hw *hw = &adapter->hw;
799 u64 queue = (u64)(1 << vector);
802 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
803 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
807 ixv_rearm_queues(struct adapter *adapter, u64 queues)
809 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
810 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
815 ixv_handle_que(void *context, int pending)
817 struct ix_queue *que = context;
818 struct adapter *adapter = que->adapter;
819 struct tx_ring *txr = que->txr;
820 struct ifnet *ifp = adapter->ifp;
823 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
824 more = ixgbe_rxeof(que);
827 #if __FreeBSD_version >= 800000
828 if (!drbr_empty(ifp, txr->br))
829 ixgbe_mq_start_locked(ifp, txr);
831 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
832 ixgbe_start_locked(txr, ifp);
834 IXGBE_TX_UNLOCK(txr);
836 taskqueue_enqueue(que->tq, &que->que_task);
841 /* Reenable this interrupt */
842 ixv_enable_queue(adapter, que->msix);
846 /*********************************************************************
848 * MSI Queue Interrupt Service routine
850 **********************************************************************/
852 ixv_msix_que(void *arg)
854 struct ix_queue *que = arg;
855 struct adapter *adapter = que->adapter;
856 struct ifnet *ifp = adapter->ifp;
857 struct tx_ring *txr = que->txr;
858 struct rx_ring *rxr = que->rxr;
862 ixv_disable_queue(adapter, que->msix);
865 more = ixgbe_rxeof(que);
870 ** Make certain that if the stack
871 ** has anything queued the task gets
872 ** scheduled to handle it.
874 #ifdef IXGBE_LEGACY_TX
875 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
876 ixgbe_start_locked(txr, ifp);
878 if (!drbr_empty(adapter->ifp, txr->br))
879 ixgbe_mq_start_locked(ifp, txr);
881 IXGBE_TX_UNLOCK(txr);
885 if (ixv_enable_aim == FALSE)
888 ** Do Adaptive Interrupt Moderation:
889 ** - Write out last calculated setting
890 ** - Calculate based on average size over
891 ** the last interval.
893 if (que->eitr_setting)
894 IXGBE_WRITE_REG(&adapter->hw,
895 IXGBE_VTEITR(que->msix),
898 que->eitr_setting = 0;
900 /* Idle, do nothing */
901 if ((txr->bytes == 0) && (rxr->bytes == 0))
904 if ((txr->bytes) && (txr->packets))
905 newitr = txr->bytes/txr->packets;
906 if ((rxr->bytes) && (rxr->packets))
908 (rxr->bytes / rxr->packets));
909 newitr += 24; /* account for hardware frame, crc */
911 /* set an upper boundary */
912 newitr = min(newitr, 3000);
914 /* Be nice to the mid range */
915 if ((newitr > 300) && (newitr < 1200))
916 newitr = (newitr / 3);
918 newitr = (newitr / 2);
920 newitr |= newitr << 16;
922 /* save for next interrupt */
923 que->eitr_setting = newitr;
933 taskqueue_enqueue(que->tq, &que->que_task);
934 else /* Reenable this interrupt */
935 ixv_enable_queue(adapter, que->msix);
940 ixv_msix_mbx(void *arg)
942 struct adapter *adapter = arg;
943 struct ixgbe_hw *hw = &adapter->hw;
948 /* First get the cause */
949 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
950 /* Clear interrupt with write */
951 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
953 /* Link status change */
954 if (reg & IXGBE_EICR_LSC)
955 taskqueue_enqueue(adapter->tq, &adapter->link_task);
957 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
961 /*********************************************************************
963 * Media Ioctl callback
965 * This routine is called whenever the user queries the status of
966 * the interface using ifconfig.
968 **********************************************************************/
970 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
972 struct adapter *adapter = ifp->if_softc;
974 INIT_DEBUGOUT("ixv_media_status: begin");
975 IXGBE_CORE_LOCK(adapter);
976 ixv_update_link_status(adapter);
978 ifmr->ifm_status = IFM_AVALID;
979 ifmr->ifm_active = IFM_ETHER;
981 if (!adapter->link_active) {
982 IXGBE_CORE_UNLOCK(adapter);
986 ifmr->ifm_status |= IFM_ACTIVE;
988 switch (adapter->link_speed) {
989 case IXGBE_LINK_SPEED_1GB_FULL:
990 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
992 case IXGBE_LINK_SPEED_10GB_FULL:
993 ifmr->ifm_active |= IFM_FDX;
997 IXGBE_CORE_UNLOCK(adapter);
1002 /*********************************************************************
1004 * Media Ioctl callback
1006 * This routine is called when the user changes speed/duplex using
1007 * media/mediopt option with ifconfig.
1009 **********************************************************************/
1011 ixv_media_change(struct ifnet * ifp)
1013 struct adapter *adapter = ifp->if_softc;
1014 struct ifmedia *ifm = &adapter->media;
1016 INIT_DEBUGOUT("ixv_media_change: begin");
1018 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1021 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1025 device_printf(adapter->dev, "Only auto media type\n");
1033 /*********************************************************************
1036 * This routine is called whenever multicast address list is updated.
1038 **********************************************************************/
1039 #define IXGBE_RAR_ENTRIES 16
1042 ixv_set_multi(struct adapter *adapter)
1044 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1046 struct ifmultiaddr *ifma;
1048 struct ifnet *ifp = adapter->ifp;
1050 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1052 #if __FreeBSD_version < 800000
1055 if_maddr_rlock(ifp);
1057 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1058 if (ifma->ifma_addr->sa_family != AF_LINK)
1060 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1061 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1062 IXGBE_ETH_LENGTH_OF_ADDRESS);
1065 #if __FreeBSD_version < 800000
1066 IF_ADDR_UNLOCK(ifp);
1068 if_maddr_runlock(ifp);
1073 ixgbe_update_mc_addr_list(&adapter->hw,
1074 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1080 * This is an iterator function now needed by the multicast
1081 * shared code. It simply feeds the shared code routine the
1082 * addresses in the array of ixv_set_multi() one by one.
1085 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1087 u8 *addr = *update_ptr;
1091 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1092 *update_ptr = newptr;
1096 /*********************************************************************
1099 * This routine checks for link status,updates statistics,
1100 * and runs the watchdog check.
1102 **********************************************************************/
1105 ixv_local_timer(void *arg)
1107 struct adapter *adapter = arg;
1108 device_t dev = adapter->dev;
1109 struct ix_queue *que = adapter->queues;
1113 mtx_assert(&adapter->core_mtx, MA_OWNED);
1115 ixv_update_link_status(adapter);
1118 ixv_update_stats(adapter);
1121 ** Check the TX queues status
1122 ** - mark hung queues so we don't schedule on them
1123 ** - watchdog only if all queues show hung
1125 for (int i = 0; i < adapter->num_queues; i++, que++) {
1126 /* Keep track of queues with work for soft irq */
1128 queues |= ((u64)1 << que->me);
1130 ** Each time txeof runs without cleaning, but there
1131 ** are uncleaned descriptors it increments busy. If
1132 ** we get to the MAX we declare it hung.
1134 if (que->busy == IXGBE_QUEUE_HUNG) {
1136 /* Mark the queue as inactive */
1137 adapter->active_queues &= ~((u64)1 << que->me);
1140 /* Check if we've come back from hung */
1141 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1142 adapter->active_queues |= ((u64)1 << que->me);
1144 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1145 device_printf(dev,"Warning queue %d "
1146 "appears to be hung!\n", i);
1147 que->txr->busy = IXGBE_QUEUE_HUNG;
1153 /* Only truely watchdog if all queues show hung */
1154 if (hung == adapter->num_queues)
1156 else if (queues != 0) { /* Force an IRQ on queues with work */
1157 ixv_rearm_queues(adapter, queues);
1160 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1164 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1165 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1166 adapter->watchdog_events++;
1167 ixv_init_locked(adapter);
1171 ** Note: this routine updates the OS on the link state
1172 ** the real check of the hardware only happens with
1173 ** a link interrupt.
1176 ixv_update_link_status(struct adapter *adapter)
1178 struct ifnet *ifp = adapter->ifp;
1179 device_t dev = adapter->dev;
1181 if (adapter->link_up){
1182 if (adapter->link_active == FALSE) {
1184 device_printf(dev,"Link is up %d Gbps %s \n",
1185 ((adapter->link_speed == 128)? 10:1),
1187 adapter->link_active = TRUE;
1188 if_link_state_change(ifp, LINK_STATE_UP);
1190 } else { /* Link down */
1191 if (adapter->link_active == TRUE) {
1193 device_printf(dev,"Link is Down\n");
1194 if_link_state_change(ifp, LINK_STATE_DOWN);
1195 adapter->link_active = FALSE;
1203 /*********************************************************************
1205 * This routine disables all traffic on the adapter by issuing a
1206 * global reset on the MAC and deallocates TX/RX buffers.
1208 **********************************************************************/
1214 struct adapter *adapter = arg;
1215 struct ixgbe_hw *hw = &adapter->hw;
1218 mtx_assert(&adapter->core_mtx, MA_OWNED);
1220 INIT_DEBUGOUT("ixv_stop: begin\n");
1221 ixv_disable_intr(adapter);
1223 /* Tell the stack that the interface is no longer active */
1224 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1227 adapter->hw.adapter_stopped = FALSE;
1228 ixgbe_stop_adapter(hw);
1229 callout_stop(&adapter->timer);
1231 /* reprogram the RAR[0] in case user changed it. */
1232 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1238 /*********************************************************************
1240 * Determine hardware revision.
1242 **********************************************************************/
1244 ixv_identify_hardware(struct adapter *adapter)
1246 device_t dev = adapter->dev;
1247 struct ixgbe_hw *hw = &adapter->hw;
1250 ** Make sure BUSMASTER is set, on a VM under
1251 ** KVM it may not be and will break things.
1253 pci_enable_busmaster(dev);
1255 /* Save off the information about this board */
1256 hw->vendor_id = pci_get_vendor(dev);
1257 hw->device_id = pci_get_device(dev);
1258 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1259 hw->subsystem_vendor_id =
1260 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1261 hw->subsystem_device_id =
1262 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1264 /* We need this to determine device-specific things */
1265 ixgbe_set_mac_type(hw);
1267 /* Set the right number of segments */
1268 adapter->num_segs = IXGBE_82599_SCATTER;
1273 /*********************************************************************
1275 * Setup MSIX Interrupt resources and handlers
1277 **********************************************************************/
1279 ixv_allocate_msix(struct adapter *adapter)
1281 device_t dev = adapter->dev;
1282 struct ix_queue *que = adapter->queues;
1283 struct tx_ring *txr = adapter->tx_rings;
1284 int error, rid, vector = 0;
1286 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1288 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1289 RF_SHAREABLE | RF_ACTIVE);
1290 if (que->res == NULL) {
1291 device_printf(dev,"Unable to allocate"
1292 " bus resource: que interrupt [%d]\n", vector);
1295 /* Set the handler function */
1296 error = bus_setup_intr(dev, que->res,
1297 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1298 ixv_msix_que, que, &que->tag);
1301 device_printf(dev, "Failed to register QUE handler");
1304 #if __FreeBSD_version >= 800504
1305 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1308 adapter->active_queues |= (u64)(1 << que->msix);
1310 ** Bind the msix vector, and thus the
1311 ** ring to the corresponding cpu.
1313 if (adapter->num_queues > 1)
1314 bus_bind_intr(dev, que->res, i);
1315 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1316 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1317 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1318 taskqueue_thread_enqueue, &que->tq);
1319 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1320 device_get_nameunit(adapter->dev));
1325 adapter->res = bus_alloc_resource_any(dev,
1326 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1327 if (!adapter->res) {
1328 device_printf(dev,"Unable to allocate"
1329 " bus resource: MBX interrupt [%d]\n", rid);
1332 /* Set the mbx handler function */
1333 error = bus_setup_intr(dev, adapter->res,
1334 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1335 ixv_msix_mbx, adapter, &adapter->tag);
1337 adapter->res = NULL;
1338 device_printf(dev, "Failed to register LINK handler");
1341 #if __FreeBSD_version >= 800504
1342 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1344 adapter->vector = vector;
1345 /* Tasklets for Mailbox */
1346 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1347 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1348 taskqueue_thread_enqueue, &adapter->tq);
1349 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1350 device_get_nameunit(adapter->dev));
1352 ** Due to a broken design QEMU will fail to properly
1353 ** enable the guest for MSIX unless the vectors in
1354 ** the table are all set up, so we must rewrite the
1355 ** ENABLE in the MSIX control register again at this
1356 ** point to cause it to successfully initialize us.
1358 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1360 pci_find_cap(dev, PCIY_MSIX, &rid);
1361 rid += PCIR_MSIX_CTRL;
1362 msix_ctrl = pci_read_config(dev, rid, 2);
1363 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1364 pci_write_config(dev, rid, msix_ctrl, 2);
1371 * Setup MSIX resources, note that the VF
1372 * device MUST use MSIX, there is no fallback.
1375 ixv_setup_msix(struct adapter *adapter)
1377 device_t dev = adapter->dev;
1378 int rid, want, msgs;
1381 /* Must have at least 2 MSIX vectors */
1382 msgs = pci_msix_count(dev);
1386 adapter->msix_mem = bus_alloc_resource_any(dev,
1387 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1388 if (adapter->msix_mem == NULL) {
1389 device_printf(adapter->dev,
1390 "Unable to map MSIX table \n");
1395 ** Want vectors for the queues,
1396 ** plus an additional for mailbox.
1398 want = adapter->num_queues + 1;
1401 adapter->num_queues = msgs - 1;
1404 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1405 device_printf(adapter->dev,
1406 "Using MSIX interrupts with %d vectors\n", want);
1409 /* Release in case alloc was insufficient */
1410 pci_release_msi(dev);
1412 if (adapter->msix_mem != NULL) {
1413 bus_release_resource(dev, SYS_RES_MEMORY,
1414 rid, adapter->msix_mem);
1415 adapter->msix_mem = NULL;
1417 device_printf(adapter->dev,"MSIX config error\n");
1423 ixv_allocate_pci_resources(struct adapter *adapter)
1426 device_t dev = adapter->dev;
1429 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1432 if (!(adapter->pci_mem)) {
1433 device_printf(dev, "Unable to allocate bus resource: memory\n");
1437 adapter->osdep.mem_bus_space_tag =
1438 rman_get_bustag(adapter->pci_mem);
1439 adapter->osdep.mem_bus_space_handle =
1440 rman_get_bushandle(adapter->pci_mem);
1441 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1443 /* Pick up the tuneable queues */
1444 adapter->num_queues = ixv_num_queues;
1445 adapter->hw.back = adapter;
1448 ** Now setup MSI/X, should
1449 ** return us the number of
1450 ** configured vectors.
1452 adapter->msix = ixv_setup_msix(adapter);
1453 if (adapter->msix == ENXIO)
1460 ixv_free_pci_resources(struct adapter * adapter)
1462 struct ix_queue *que = adapter->queues;
1463 device_t dev = adapter->dev;
1466 memrid = PCIR_BAR(MSIX_82598_BAR);
1469 ** There is a slight possibility of a failure mode
1470 ** in attach that will result in entering this function
1471 ** before interrupt resources have been initialized, and
1472 ** in that case we do not want to execute the loops below
1473 ** We can detect this reliably by the state of the adapter
1476 if (adapter->res == NULL)
1480 ** Release all msix queue resources:
1482 for (int i = 0; i < adapter->num_queues; i++, que++) {
1483 rid = que->msix + 1;
1484 if (que->tag != NULL) {
1485 bus_teardown_intr(dev, que->res, que->tag);
1488 if (que->res != NULL)
1489 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1493 /* Clean the Legacy or Link interrupt last */
1494 if (adapter->vector) /* we are doing MSIX */
1495 rid = adapter->vector + 1;
1497 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1499 if (adapter->tag != NULL) {
1500 bus_teardown_intr(dev, adapter->res, adapter->tag);
1501 adapter->tag = NULL;
1503 if (adapter->res != NULL)
1504 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1508 pci_release_msi(dev);
1510 if (adapter->msix_mem != NULL)
1511 bus_release_resource(dev, SYS_RES_MEMORY,
1512 memrid, adapter->msix_mem);
1514 if (adapter->pci_mem != NULL)
1515 bus_release_resource(dev, SYS_RES_MEMORY,
1516 PCIR_BAR(0), adapter->pci_mem);
1521 /*********************************************************************
1523 * Setup networking device structure and register an interface.
1525 **********************************************************************/
1527 ixv_setup_interface(device_t dev, struct adapter *adapter)
1531 INIT_DEBUGOUT("ixv_setup_interface: begin");
1533 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1535 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1536 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1537 ifp->if_baudrate = 1000000000;
1538 ifp->if_init = ixv_init;
1539 ifp->if_softc = adapter;
1540 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1541 ifp->if_ioctl = ixv_ioctl;
1542 #if __FreeBSD_version >= 800000
1543 ifp->if_transmit = ixgbe_mq_start;
1544 ifp->if_qflush = ixgbe_qflush;
1546 ifp->if_start = ixgbe_start;
1548 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1550 ether_ifattach(ifp, adapter->hw.mac.addr);
1552 adapter->max_frame_size =
1553 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1556 * Tell the upper layer(s) we support long frames.
1558 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1560 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1561 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1562 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1565 ifp->if_capabilities |= IFCAP_LRO;
1566 ifp->if_capenable = ifp->if_capabilities;
1569 * Specify the media types supported by this adapter and register
1570 * callbacks to update media and link information
1572 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1574 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1575 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1581 ixv_config_link(struct adapter *adapter)
1583 struct ixgbe_hw *hw = &adapter->hw;
1586 if (hw->mac.ops.check_link)
1587 hw->mac.ops.check_link(hw, &autoneg,
1588 &adapter->link_up, FALSE);
1592 /*********************************************************************
1594 * Enable transmit unit.
1596 **********************************************************************/
1598 ixv_initialize_transmit_units(struct adapter *adapter)
1600 struct tx_ring *txr = adapter->tx_rings;
1601 struct ixgbe_hw *hw = &adapter->hw;
1604 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1605 u64 tdba = txr->txdma.dma_paddr;
1608 /* Set WTHRESH to 8, burst writeback */
1609 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1610 txdctl |= (8 << 16);
1611 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1613 /* Set the HW Tx Head and Tail indices */
1614 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1617 /* Set Tx Tail register */
1618 txr->tail = IXGBE_VFTDT(i);
1620 /* Set Ring parameters */
1621 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1622 (tdba & 0x00000000ffffffffULL));
1623 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1624 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1625 adapter->num_tx_desc *
1626 sizeof(struct ixgbe_legacy_tx_desc));
1627 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1628 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1629 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1632 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1633 txdctl |= IXGBE_TXDCTL_ENABLE;
1634 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1641 /*********************************************************************
1643 * Setup receive registers and features.
1645 **********************************************************************/
1646 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1649 ixv_initialize_receive_units(struct adapter *adapter)
1651 struct rx_ring *rxr = adapter->rx_rings;
1652 struct ixgbe_hw *hw = &adapter->hw;
1653 struct ifnet *ifp = adapter->ifp;
1654 u32 bufsz, rxcsum, psrtype;
1656 if (ifp->if_mtu > ETHERMTU)
1657 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1659 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1661 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1662 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1663 IXGBE_PSRTYPE_L2HDR;
1665 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1667 /* Tell PF our max_frame size */
1668 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1670 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1671 u64 rdba = rxr->rxdma.dma_paddr;
1674 /* Disable the queue */
1675 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1676 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1677 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1678 for (int j = 0; j < 10; j++) {
1679 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1680 IXGBE_RXDCTL_ENABLE)
1686 /* Setup the Base and Length of the Rx Descriptor Ring */
1687 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1688 (rdba & 0x00000000ffffffffULL));
1689 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1691 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1692 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1694 /* Reset the ring indices */
1695 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1696 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1698 /* Set up the SRRCTL register */
1699 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1700 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1701 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1703 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1704 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1706 /* Capture Rx Tail register */
1707 rxr->tail = IXGBE_VFRDT(rxr->me);
1709 /* Do the queue enabling last */
1710 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1711 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1712 for (int k = 0; k < 10; k++) {
1713 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1714 IXGBE_RXDCTL_ENABLE)
1721 /* Set the Tail Pointer */
1724 * In netmap mode, we must preserve the buffers made
1725 * available to userspace before the if_init()
1726 * (this is true by default on the TX side, because
1727 * init makes all buffers available to userspace).
1729 * netmap_reset() and the device specific routines
1730 * (e.g. ixgbe_setup_receive_rings()) map these
1731 * buffers at the end of the NIC ring, so here we
1732 * must set the RDT (tail) register to make sure
1733 * they are not overwritten.
1735 * In this driver the NIC ring starts at RDH = 0,
1736 * RDT points to the last slot available for reception (?),
1737 * so RDT = num_rx_desc - 1 means the whole ring is available.
1739 if (ifp->if_capenable & IFCAP_NETMAP) {
1740 struct netmap_adapter *na = NA(adapter->ifp);
1741 struct netmap_kring *kring = &na->rx_rings[i];
1742 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1744 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1746 #endif /* DEV_NETMAP */
1747 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1748 adapter->num_rx_desc - 1);
1751 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1753 if (ifp->if_capenable & IFCAP_RXCSUM)
1754 rxcsum |= IXGBE_RXCSUM_PCSD;
1756 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1757 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1759 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1765 ixv_setup_vlan_support(struct adapter *adapter)
1767 struct ixgbe_hw *hw = &adapter->hw;
1768 u32 ctrl, vid, vfta, retry;
1769 struct rx_ring *rxr;
1772 ** We get here thru init_locked, meaning
1773 ** a soft reset, this has already cleared
1774 ** the VFTA and other state, so if there
1775 ** have been no vlan's registered do nothing.
1777 if (adapter->num_vlans == 0)
1780 /* Enable the queues */
1781 for (int i = 0; i < adapter->num_queues; i++) {
1782 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1783 ctrl |= IXGBE_RXDCTL_VME;
1784 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1786 * Let Rx path know that it needs to store VLAN tag
1787 * as part of extra mbuf info.
1789 rxr = &adapter->rx_rings[i];
1790 rxr->vtag_strip = TRUE;
1794 ** A soft reset zero's out the VFTA, so
1795 ** we need to repopulate it now.
1797 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1798 if (ixv_shadow_vfta[i] == 0)
1800 vfta = ixv_shadow_vfta[i];
1802 ** Reconstruct the vlan id's
1803 ** based on the bits set in each
1804 ** of the array ints.
1806 for (int j = 0; j < 32; j++) {
1808 if ((vfta & (1 << j)) == 0)
1811 /* Call the shared code mailbox routine */
1812 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1821 ** This routine is run via an vlan config EVENT,
1822 ** it enables us to use the HW Filter table since
1823 ** we can get the vlan id. This just creates the
1824 ** entry in the soft version of the VFTA, init will
1825 ** repopulate the real table.
1828 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1830 struct adapter *adapter = ifp->if_softc;
1833 if (ifp->if_softc != arg) /* Not our event */
1836 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1839 IXGBE_CORE_LOCK(adapter);
1840 index = (vtag >> 5) & 0x7F;
1842 ixv_shadow_vfta[index] |= (1 << bit);
1843 ++adapter->num_vlans;
1844 /* Re-init to load the changes */
1845 ixv_init_locked(adapter);
1846 IXGBE_CORE_UNLOCK(adapter);
1850 ** This routine is run via an vlan
1851 ** unconfig EVENT, remove our entry
1852 ** in the soft vfta.
1855 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1857 struct adapter *adapter = ifp->if_softc;
1860 if (ifp->if_softc != arg)
1863 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1866 IXGBE_CORE_LOCK(adapter);
1867 index = (vtag >> 5) & 0x7F;
1869 ixv_shadow_vfta[index] &= ~(1 << bit);
1870 --adapter->num_vlans;
1871 /* Re-init to load the changes */
1872 ixv_init_locked(adapter);
1873 IXGBE_CORE_UNLOCK(adapter);
1877 ixv_enable_intr(struct adapter *adapter)
1879 struct ixgbe_hw *hw = &adapter->hw;
1880 struct ix_queue *que = adapter->queues;
1881 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1884 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1886 mask = IXGBE_EIMS_ENABLE_MASK;
1887 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1888 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1890 for (int i = 0; i < adapter->num_queues; i++, que++)
1891 ixv_enable_queue(adapter, que->msix);
1893 IXGBE_WRITE_FLUSH(hw);
1899 ixv_disable_intr(struct adapter *adapter)
1901 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1903 IXGBE_WRITE_FLUSH(&adapter->hw);
1908 ** Setup the correct IVAR register for a particular MSIX interrupt
1909 ** - entry is the register array entry
1910 ** - vector is the MSIX vector for this queue
1911 ** - type is RX/TX/MISC
1914 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1916 struct ixgbe_hw *hw = &adapter->hw;
1919 vector |= IXGBE_IVAR_ALLOC_VAL;
1921 if (type == -1) { /* MISC IVAR */
1922 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1925 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1926 } else { /* RX/TX IVARS */
1927 index = (16 * (entry & 1)) + (8 * type);
1928 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1929 ivar &= ~(0xFF << index);
1930 ivar |= (vector << index);
1931 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1936 ixv_configure_ivars(struct adapter *adapter)
1938 struct ix_queue *que = adapter->queues;
1940 for (int i = 0; i < adapter->num_queues; i++, que++) {
1941 /* First the RX queue entry */
1942 ixv_set_ivar(adapter, i, que->msix, 0);
1943 /* ... and the TX */
1944 ixv_set_ivar(adapter, i, que->msix, 1);
1945 /* Set an initial value in EITR */
1946 IXGBE_WRITE_REG(&adapter->hw,
1947 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1950 /* For the mailbox interrupt */
1951 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1956 ** Tasklet handler for MSIX MBX interrupts
1957 ** - do outside interrupt since it might sleep
1960 ixv_handle_mbx(void *context, int pending)
1962 struct adapter *adapter = context;
1964 ixgbe_check_link(&adapter->hw,
1965 &adapter->link_speed, &adapter->link_up, 0);
1966 ixv_update_link_status(adapter);
1970 ** The VF stats registers never have a truely virgin
1971 ** starting point, so this routine tries to make an
1972 ** artificial one, marking ground zero on attach as
1976 ixv_save_stats(struct adapter *adapter)
1978 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1979 adapter->stats.vf.saved_reset_vfgprc +=
1980 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1981 adapter->stats.vf.saved_reset_vfgptc +=
1982 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1983 adapter->stats.vf.saved_reset_vfgorc +=
1984 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1985 adapter->stats.vf.saved_reset_vfgotc +=
1986 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1987 adapter->stats.vf.saved_reset_vfmprc +=
1988 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1993 ixv_init_stats(struct adapter *adapter)
1995 struct ixgbe_hw *hw = &adapter->hw;
1997 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1998 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1999 adapter->stats.vf.last_vfgorc |=
2000 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2002 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2003 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2004 adapter->stats.vf.last_vfgotc |=
2005 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2007 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2009 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2010 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2011 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2012 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2013 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2016 #define UPDATE_STAT_32(reg, last, count) \
2018 u32 current = IXGBE_READ_REG(hw, reg); \
2019 if (current < last) \
2020 count += 0x100000000LL; \
2022 count &= 0xFFFFFFFF00000000LL; \
2026 #define UPDATE_STAT_36(lsb, msb, last, count) \
2028 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2029 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2030 u64 current = ((cur_msb << 32) | cur_lsb); \
2031 if (current < last) \
2032 count += 0x1000000000LL; \
2034 count &= 0xFFFFFFF000000000LL; \
2039 ** ixv_update_stats - Update the board statistics counters.
2042 ixv_update_stats(struct adapter *adapter)
2044 struct ixgbe_hw *hw = &adapter->hw;
2046 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2047 adapter->stats.vf.vfgprc);
2048 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2049 adapter->stats.vf.vfgptc);
2050 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2051 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2052 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2053 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2054 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2055 adapter->stats.vf.vfmprc);
2059 * Add statistic sysctls for the VF.
2062 ixv_add_stats_sysctls(struct adapter *adapter)
2064 device_t dev = adapter->dev;
2065 struct ix_queue *que = &adapter->queues[0];
2066 struct tx_ring *txr = que->txr;
2067 struct rx_ring *rxr = que->rxr;
2069 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2070 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2071 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2072 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2074 struct sysctl_oid *stat_node, *queue_node;
2075 struct sysctl_oid_list *stat_list, *queue_list;
2077 /* Driver Statistics */
2078 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2079 CTLFLAG_RD, &adapter->dropped_pkts,
2080 "Driver dropped packets");
2081 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2082 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2083 "m_defrag() failed");
2084 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2085 CTLFLAG_RD, &adapter->watchdog_events,
2086 "Watchdog timeouts");
2088 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2090 "VF Statistics (read from HW registers)");
2091 stat_list = SYSCTL_CHILDREN(stat_node);
2093 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2094 CTLFLAG_RD, &stats->vfgprc,
2095 "Good Packets Received");
2096 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2097 CTLFLAG_RD, &stats->vfgorc,
2098 "Good Octets Received");
2099 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2100 CTLFLAG_RD, &stats->vfmprc,
2101 "Multicast Packets Received");
2102 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2103 CTLFLAG_RD, &stats->vfgptc,
2104 "Good Packets Transmitted");
2105 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2106 CTLFLAG_RD, &stats->vfgotc,
2107 "Good Octets Transmitted");
2109 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2111 "Queue Statistics (collected by SW)");
2112 queue_list = SYSCTL_CHILDREN(queue_node);
2114 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2115 CTLFLAG_RD, &(que->irqs),
2117 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2118 CTLFLAG_RD, &(rxr->rx_irq),
2119 "RX irqs on queue");
2120 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2121 CTLFLAG_RD, &(rxr->rx_packets),
2123 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2124 CTLFLAG_RD, &(rxr->rx_bytes),
2126 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2127 CTLFLAG_RD, &(rxr->rx_discarded),
2128 "Discarded RX packets");
2130 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2131 CTLFLAG_RD, &(txr->total_packets),
2133 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2134 CTLFLAG_RD, &(txr->bytes), 0,
2136 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2137 CTLFLAG_RD, &(txr->no_desc_avail),
2138 "# of times not enough descriptors were available during TX");
2142 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2143 const char *description, int *limit, int value)
2146 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2147 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2148 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2151 /**********************************************************************
2153 * This routine is called only when em_display_debug_stats is enabled.
2154 * This routine provides a way to take a look at important statistics
2155 * maintained by the driver and hardware.
2157 **********************************************************************/
2159 ixv_print_debug_info(struct adapter *adapter)
2161 device_t dev = adapter->dev;
2162 struct ixgbe_hw *hw = &adapter->hw;
2163 struct ix_queue *que = adapter->queues;
2164 struct rx_ring *rxr;
2165 struct tx_ring *txr;
2166 struct lro_ctrl *lro;
2168 device_printf(dev,"Error Byte Count = %u \n",
2169 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2171 for (int i = 0; i < adapter->num_queues; i++, que++) {
2175 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2176 que->msix, (long)que->irqs);
2177 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2178 rxr->me, (long long)rxr->rx_packets);
2179 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2180 rxr->me, (long)rxr->rx_bytes);
2181 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2182 rxr->me, lro->lro_queued);
2183 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2184 rxr->me, lro->lro_flushed);
2185 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2186 txr->me, (long)txr->total_packets);
2187 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2188 txr->me, (long)txr->no_desc_avail);
2191 device_printf(dev,"MBX IRQ Handled: %lu\n",
2192 (long)adapter->link_irq);
2197 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2200 struct adapter *adapter;
2203 error = sysctl_handle_int(oidp, &result, 0, req);
2205 if (error || !req->newptr)
2209 adapter = (struct adapter *) arg1;
2210 ixv_print_debug_info(adapter);