1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
45 *********************************************************************/
46 char ixv_driver_version[] = "1.4.6-k";
48 /*********************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64 /* required last entry */
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
72 static char *ixv_strings[] = {
73 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 /*********************************************************************
78 *********************************************************************/
79 static int ixv_probe(device_t);
80 static int ixv_attach(device_t);
81 static int ixv_detach(device_t);
82 static int ixv_shutdown(device_t);
83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void ixv_init(void *);
85 static void ixv_init_locked(struct adapter *);
86 static void ixv_stop(void *);
87 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int ixv_media_change(struct ifnet *);
89 static void ixv_identify_hardware(struct adapter *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_setup_msix(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
96 static void ixv_config_link(struct adapter *);
98 static void ixv_initialize_transmit_units(struct adapter *);
99 static void ixv_initialize_receive_units(struct adapter *);
101 static void ixv_enable_intr(struct adapter *);
102 static void ixv_disable_intr(struct adapter *);
103 static void ixv_set_multi(struct adapter *);
104 static void ixv_update_link_status(struct adapter *);
105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void ixv_configure_ivars(struct adapter *);
108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110 static void ixv_setup_vlan_support(struct adapter *);
111 static void ixv_register_vlan(void *, struct ifnet *, u16);
112 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
114 static void ixv_save_stats(struct adapter *);
115 static void ixv_init_stats(struct adapter *);
116 static void ixv_update_stats(struct adapter *);
117 static void ixv_add_stats_sysctls(struct adapter *);
118 static void ixv_set_sysctl_value(struct adapter *, const char *,
119 const char *, int *, int);
121 /* The MSI/X Interrupt handlers */
122 static void ixv_msix_que(void *);
123 static void ixv_msix_mbx(void *);
125 /* Deferred interrupt tasklets */
126 static void ixv_handle_que(void *, int);
127 static void ixv_handle_mbx(void *, int);
131 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
134 extern void ixgbe_netmap_attach(struct adapter *adapter);
136 #include <net/netmap.h>
137 #include <sys/selinfo.h>
138 #include <dev/netmap/netmap_kern.h>
139 #endif /* DEV_NETMAP */
141 /*********************************************************************
142 * FreeBSD Device Interface Entry Points
143 *********************************************************************/
145 static device_method_t ixv_methods[] = {
146 /* Device interface */
147 DEVMETHOD(device_probe, ixv_probe),
148 DEVMETHOD(device_attach, ixv_attach),
149 DEVMETHOD(device_detach, ixv_detach),
150 DEVMETHOD(device_shutdown, ixv_shutdown),
154 static driver_t ixv_driver = {
155 "ixv", ixv_methods, sizeof(struct adapter),
158 devclass_t ixv_devclass;
159 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160 MODULE_DEPEND(ixv, pci, 1, 1, 1);
161 MODULE_DEPEND(ixv, ether, 1, 1, 1);
163 MODULE_DEPEND(ix, netmap, 1, 1, 1);
164 #endif /* DEV_NETMAP */
165 /* XXX depend on 'ix' ? */
168 ** TUNEABLE PARAMETERS:
171 /* Number of Queues - do not exceed MSIX vectors - 1 */
172 static int ixv_num_queues = 1;
173 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
176 ** AIM: Adaptive Interrupt Moderation
177 ** which means that the interrupt rate
178 ** is varied over time based on the
179 ** traffic for that interrupt vector
181 static int ixv_enable_aim = FALSE;
182 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
184 /* How many packets rxeof tries to clean at a time */
185 static int ixv_rx_process_limit = 256;
186 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
188 /* How many packets txeof tries to clean at a time */
189 static int ixv_tx_process_limit = 256;
190 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
192 /* Flow control setting, default to full */
193 static int ixv_flow_control = ixgbe_fc_full;
194 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
197 * Header split: this causes the hardware to DMA
198 * the header into a seperate mbuf from the payload,
199 * it can be a performance win in some workloads, but
200 * in others it actually hurts, its off by default.
202 static int ixv_header_split = FALSE;
203 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
206 ** Number of TX descriptors per ring,
207 ** setting higher than RX as this seems
208 ** the better performing choice.
210 static int ixv_txd = DEFAULT_TXD;
211 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
213 /* Number of RX descriptors per ring */
214 static int ixv_rxd = DEFAULT_RXD;
215 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
218 ** Shadow VFTA table, this is needed because
219 ** the real filter table gets cleared during
220 ** a soft reset and we need to repopulate it.
222 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
224 /*********************************************************************
225 * Device identification routine
227 * ixv_probe determines if the driver should be loaded on
228 * adapter based on PCI vendor/device id of the adapter.
230 * return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
234 ixv_probe(device_t dev)
236 ixgbe_vendor_info_t *ent;
238 u16 pci_vendor_id = 0;
239 u16 pci_device_id = 0;
240 u16 pci_subvendor_id = 0;
241 u16 pci_subdevice_id = 0;
242 char adapter_name[256];
245 pci_vendor_id = pci_get_vendor(dev);
246 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
249 pci_device_id = pci_get_device(dev);
250 pci_subvendor_id = pci_get_subvendor(dev);
251 pci_subdevice_id = pci_get_subdevice(dev);
253 ent = ixv_vendor_info_array;
254 while (ent->vendor_id != 0) {
255 if ((pci_vendor_id == ent->vendor_id) &&
256 (pci_device_id == ent->device_id) &&
258 ((pci_subvendor_id == ent->subvendor_id) ||
259 (ent->subvendor_id == 0)) &&
261 ((pci_subdevice_id == ent->subdevice_id) ||
262 (ent->subdevice_id == 0))) {
263 sprintf(adapter_name, "%s, Version - %s",
264 ixv_strings[ent->index],
266 device_set_desc_copy(dev, adapter_name);
267 return (BUS_PROBE_DEFAULT);
274 /*********************************************************************
275 * Device initialization routine
277 * The attach entry point is called when the driver is being loaded.
278 * This routine identifies the type of hardware, allocates all resources
279 * and initializes the hardware.
281 * return 0 on success, positive on failure
282 *********************************************************************/
285 ixv_attach(device_t dev)
287 struct adapter *adapter;
291 INIT_DEBUGOUT("ixv_attach: begin");
293 /* Allocate, clear, and link in our adapter structure */
294 adapter = device_get_softc(dev);
299 adapter->init_locked = ixv_init_locked;
300 adapter->stop_locked = ixv_stop;
304 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
307 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
312 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314 OID_AUTO, "enable_aim", CTLFLAG_RW,
315 &ixv_enable_aim, 1, "Interrupt Moderation");
317 /* Set up the timer callout */
318 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
320 /* Determine hardware revision */
321 ixv_identify_hardware(adapter);
323 /* Do base PCI setup - map BAR0 */
324 if (ixv_allocate_pci_resources(adapter)) {
325 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
330 /* Sysctls for limiting the amount of work done in the taskqueues */
331 ixv_set_sysctl_value(adapter, "rx_processing_limit",
332 "max number of rx packets to process",
333 &adapter->rx_process_limit, ixv_rx_process_limit);
335 ixv_set_sysctl_value(adapter, "tx_processing_limit",
336 "max number of tx packets to process",
337 &adapter->tx_process_limit, ixv_tx_process_limit);
339 /* Sysctls for limiting the amount of work done in the taskqueues */
340 ixv_set_sysctl_value(adapter, "rx_processing_limit",
341 "max number of rx packets to process",
342 &adapter->rx_process_limit, ixv_rx_process_limit);
344 ixv_set_sysctl_value(adapter, "tx_processing_limit",
345 "max number of tx packets to process",
346 &adapter->tx_process_limit, ixv_tx_process_limit);
348 /* Do descriptor calc and sanity checks */
349 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351 device_printf(dev, "TXD config issue, using default!\n");
352 adapter->num_tx_desc = DEFAULT_TXD;
354 adapter->num_tx_desc = ixv_txd;
356 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
358 device_printf(dev, "RXD config issue, using default!\n");
359 adapter->num_rx_desc = DEFAULT_RXD;
361 adapter->num_rx_desc = ixv_rxd;
363 /* Allocate our TX/RX Queues */
364 if (ixgbe_allocate_queues(adapter)) {
365 device_printf(dev, "ixgbe_allocate_queues() failed!\n");
371 ** Initialize the shared code: its
372 ** at this point the mac type is set.
374 error = ixgbe_init_shared_code(hw);
376 device_printf(dev, "ixgbe_init_shared_code() failed!\n");
381 /* Setup the mailbox */
382 ixgbe_init_mbx_params_vf(hw);
384 /* Reset mbox api to 1.0 */
385 error = ixgbe_reset_hw(hw);
386 if (error == IXGBE_ERR_RESET_FAILED)
387 device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
389 device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
395 /* Negotiate mailbox API version */
396 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
398 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
403 error = ixgbe_init_hw(hw);
405 device_printf(dev, "ixgbe_init_hw() failed!\n");
410 error = ixv_allocate_msix(adapter);
412 device_printf(dev, "ixv_allocate_msix() failed!\n");
416 /* If no mac address was assigned, make a random one */
417 if (!ixv_check_ether_addr(hw->mac.addr)) {
418 u8 addr[ETHER_ADDR_LEN];
419 arc4rand(&addr, sizeof(addr), 0);
422 bcopy(addr, hw->mac.addr, sizeof(addr));
425 /* Setup OS specific network interface */
426 ixv_setup_interface(dev, adapter);
428 /* Do the stats setup */
429 ixv_save_stats(adapter);
430 ixv_init_stats(adapter);
431 ixv_add_stats_sysctls(adapter);
433 /* Register for VLAN events */
434 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
435 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
436 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
437 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
440 ixgbe_netmap_attach(adapter);
441 #endif /* DEV_NETMAP */
442 INIT_DEBUGOUT("ixv_attach: end");
446 ixgbe_free_transmit_structures(adapter);
447 ixgbe_free_receive_structures(adapter);
449 ixv_free_pci_resources(adapter);
454 /*********************************************************************
455 * Device removal routine
457 * The detach entry point is called when the driver is being removed.
458 * This routine stops the adapter and deallocates all the resources
459 * that were allocated for driver operation.
461 * return 0 on success, positive on failure
462 *********************************************************************/
465 ixv_detach(device_t dev)
467 struct adapter *adapter = device_get_softc(dev);
468 struct ix_queue *que = adapter->queues;
470 INIT_DEBUGOUT("ixv_detach: begin");
472 /* Make sure VLANS are not using driver */
473 if (adapter->ifp->if_vlantrunk != NULL) {
474 device_printf(dev, "Vlan in use, detach first\n");
478 IXGBE_CORE_LOCK(adapter);
480 IXGBE_CORE_UNLOCK(adapter);
482 for (int i = 0; i < adapter->num_queues; i++, que++) {
484 struct tx_ring *txr = que->txr;
485 taskqueue_drain(que->tq, &txr->txq_task);
486 taskqueue_drain(que->tq, &que->que_task);
487 taskqueue_free(que->tq);
491 /* Drain the Mailbox(link) queue */
493 taskqueue_drain(adapter->tq, &adapter->link_task);
494 taskqueue_free(adapter->tq);
497 /* Unregister VLAN events */
498 if (adapter->vlan_attach != NULL)
499 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
500 if (adapter->vlan_detach != NULL)
501 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
503 ether_ifdetach(adapter->ifp);
504 callout_drain(&adapter->timer);
506 netmap_detach(adapter->ifp);
507 #endif /* DEV_NETMAP */
508 ixv_free_pci_resources(adapter);
509 bus_generic_detach(dev);
510 if_free(adapter->ifp);
512 ixgbe_free_transmit_structures(adapter);
513 ixgbe_free_receive_structures(adapter);
515 IXGBE_CORE_LOCK_DESTROY(adapter);
519 /*********************************************************************
521 * Shutdown entry point
523 **********************************************************************/
525 ixv_shutdown(device_t dev)
527 struct adapter *adapter = device_get_softc(dev);
528 IXGBE_CORE_LOCK(adapter);
530 IXGBE_CORE_UNLOCK(adapter);
535 /*********************************************************************
538 * ixv_ioctl is called when the user wants to configure the
541 * return 0 on success, positive on failure
542 **********************************************************************/
545 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
547 struct adapter *adapter = ifp->if_softc;
548 struct ifreq *ifr = (struct ifreq *) data;
549 #if defined(INET) || defined(INET6)
550 struct ifaddr *ifa = (struct ifaddr *) data;
551 bool avoid_reset = FALSE;
559 if (ifa->ifa_addr->sa_family == AF_INET)
563 if (ifa->ifa_addr->sa_family == AF_INET6)
566 #if defined(INET) || defined(INET6)
568 ** Calling init results in link renegotiation,
569 ** so we avoid doing it when possible.
572 ifp->if_flags |= IFF_UP;
573 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
575 if (!(ifp->if_flags & IFF_NOARP))
576 arp_ifinit(ifp, ifa);
578 error = ether_ioctl(ifp, command, data);
582 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
583 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
586 IXGBE_CORE_LOCK(adapter);
587 ifp->if_mtu = ifr->ifr_mtu;
588 adapter->max_frame_size =
589 ifp->if_mtu + IXGBE_MTU_HDR;
590 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
591 ixv_init_locked(adapter);
592 IXGBE_CORE_UNLOCK(adapter);
596 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
597 IXGBE_CORE_LOCK(adapter);
598 if (ifp->if_flags & IFF_UP) {
599 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
600 ixv_init_locked(adapter);
602 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
604 adapter->if_flags = ifp->if_flags;
605 IXGBE_CORE_UNLOCK(adapter);
609 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
610 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
611 IXGBE_CORE_LOCK(adapter);
612 ixv_disable_intr(adapter);
613 ixv_set_multi(adapter);
614 ixv_enable_intr(adapter);
615 IXGBE_CORE_UNLOCK(adapter);
620 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
621 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
625 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
626 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
627 if (mask & IFCAP_HWCSUM)
628 ifp->if_capenable ^= IFCAP_HWCSUM;
629 if (mask & IFCAP_TSO4)
630 ifp->if_capenable ^= IFCAP_TSO4;
631 if (mask & IFCAP_LRO)
632 ifp->if_capenable ^= IFCAP_LRO;
633 if (mask & IFCAP_VLAN_HWTAGGING)
634 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
635 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
636 IXGBE_CORE_LOCK(adapter);
637 ixv_init_locked(adapter);
638 IXGBE_CORE_UNLOCK(adapter);
640 VLAN_CAPABILITIES(ifp);
645 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
646 error = ether_ioctl(ifp, command, data);
653 /*********************************************************************
656 * This routine is used in two ways. It is used by the stack as
657 * init entry point in network interface structure. It is also used
658 * by the driver as a hw/sw initialization routine to get to a
661 * return 0 on success, positive on failure
662 **********************************************************************/
663 #define IXGBE_MHADD_MFS_SHIFT 16
666 ixv_init_locked(struct adapter *adapter)
668 struct ifnet *ifp = adapter->ifp;
669 device_t dev = adapter->dev;
670 struct ixgbe_hw *hw = &adapter->hw;
673 INIT_DEBUGOUT("ixv_init_locked: begin");
674 mtx_assert(&adapter->core_mtx, MA_OWNED);
675 hw->adapter_stopped = FALSE;
676 ixgbe_stop_adapter(hw);
677 callout_stop(&adapter->timer);
679 /* reprogram the RAR[0] in case user changed it. */
680 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
682 /* Get the latest mac address, User can use a LAA */
683 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
684 IXGBE_ETH_LENGTH_OF_ADDRESS);
685 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
686 hw->addr_ctrl.rar_used_count = 1;
688 /* Prepare transmit descriptors and buffers */
689 if (ixgbe_setup_transmit_structures(adapter)) {
690 device_printf(dev, "Could not setup transmit structures\n");
695 /* Reset VF and renegotiate mailbox API version */
697 error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
699 device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
701 ixv_initialize_transmit_units(adapter);
703 /* Setup Multicast table */
704 ixv_set_multi(adapter);
707 ** Determine the correct mbuf pool
708 ** for doing jumbo/headersplit
710 if (ifp->if_mtu > ETHERMTU)
711 adapter->rx_mbuf_sz = MJUMPAGESIZE;
713 adapter->rx_mbuf_sz = MCLBYTES;
715 /* Prepare receive descriptors and buffers */
716 if (ixgbe_setup_receive_structures(adapter)) {
717 device_printf(dev, "Could not setup receive structures\n");
722 /* Configure RX settings */
723 ixv_initialize_receive_units(adapter);
725 /* Set the various hardware offload abilities */
726 ifp->if_hwassist = 0;
727 if (ifp->if_capenable & IFCAP_TSO4)
728 ifp->if_hwassist |= CSUM_TSO;
729 if (ifp->if_capenable & IFCAP_TXCSUM) {
730 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
731 #if __FreeBSD_version >= 800000
732 ifp->if_hwassist |= CSUM_SCTP;
736 /* Set up VLAN offload and filter */
737 ixv_setup_vlan_support(adapter);
739 /* Set up MSI/X routing */
740 ixv_configure_ivars(adapter);
742 /* Set up auto-mask */
743 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
745 /* Set moderation on the Link interrupt */
746 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
749 ixv_init_stats(adapter);
751 /* Config/Enable Link */
752 ixv_config_link(adapter);
755 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
757 /* And now turn on interrupts */
758 ixv_enable_intr(adapter);
760 /* Now inform the stack we're ready */
761 ifp->if_drv_flags |= IFF_DRV_RUNNING;
762 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
770 struct adapter *adapter = arg;
772 IXGBE_CORE_LOCK(adapter);
773 ixv_init_locked(adapter);
774 IXGBE_CORE_UNLOCK(adapter);
781 ** MSIX Interrupt Handlers and Tasklets
786 ixv_enable_queue(struct adapter *adapter, u32 vector)
788 struct ixgbe_hw *hw = &adapter->hw;
789 u32 queue = 1 << vector;
792 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
793 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
797 ixv_disable_queue(struct adapter *adapter, u32 vector)
799 struct ixgbe_hw *hw = &adapter->hw;
800 u64 queue = (u64)(1 << vector);
803 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
804 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
808 ixv_rearm_queues(struct adapter *adapter, u64 queues)
810 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
811 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
816 ixv_handle_que(void *context, int pending)
818 struct ix_queue *que = context;
819 struct adapter *adapter = que->adapter;
820 struct tx_ring *txr = que->txr;
821 struct ifnet *ifp = adapter->ifp;
824 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
825 more = ixgbe_rxeof(que);
828 #if __FreeBSD_version >= 800000
829 if (!drbr_empty(ifp, txr->br))
830 ixgbe_mq_start_locked(ifp, txr);
832 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
833 ixgbe_start_locked(txr, ifp);
835 IXGBE_TX_UNLOCK(txr);
837 taskqueue_enqueue(que->tq, &que->que_task);
842 /* Reenable this interrupt */
843 ixv_enable_queue(adapter, que->msix);
847 /*********************************************************************
849 * MSI Queue Interrupt Service routine
851 **********************************************************************/
853 ixv_msix_que(void *arg)
855 struct ix_queue *que = arg;
856 struct adapter *adapter = que->adapter;
857 struct ifnet *ifp = adapter->ifp;
858 struct tx_ring *txr = que->txr;
859 struct rx_ring *rxr = que->rxr;
863 ixv_disable_queue(adapter, que->msix);
866 more = ixgbe_rxeof(que);
871 ** Make certain that if the stack
872 ** has anything queued the task gets
873 ** scheduled to handle it.
875 #ifdef IXGBE_LEGACY_TX
876 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
877 ixgbe_start_locked(txr, ifp);
879 if (!drbr_empty(adapter->ifp, txr->br))
880 ixgbe_mq_start_locked(ifp, txr);
882 IXGBE_TX_UNLOCK(txr);
886 if (ixv_enable_aim == FALSE)
889 ** Do Adaptive Interrupt Moderation:
890 ** - Write out last calculated setting
891 ** - Calculate based on average size over
892 ** the last interval.
894 if (que->eitr_setting)
895 IXGBE_WRITE_REG(&adapter->hw,
896 IXGBE_VTEITR(que->msix),
899 que->eitr_setting = 0;
901 /* Idle, do nothing */
902 if ((txr->bytes == 0) && (rxr->bytes == 0))
905 if ((txr->bytes) && (txr->packets))
906 newitr = txr->bytes/txr->packets;
907 if ((rxr->bytes) && (rxr->packets))
909 (rxr->bytes / rxr->packets));
910 newitr += 24; /* account for hardware frame, crc */
912 /* set an upper boundary */
913 newitr = min(newitr, 3000);
915 /* Be nice to the mid range */
916 if ((newitr > 300) && (newitr < 1200))
917 newitr = (newitr / 3);
919 newitr = (newitr / 2);
921 newitr |= newitr << 16;
923 /* save for next interrupt */
924 que->eitr_setting = newitr;
934 taskqueue_enqueue(que->tq, &que->que_task);
935 else /* Reenable this interrupt */
936 ixv_enable_queue(adapter, que->msix);
941 ixv_msix_mbx(void *arg)
943 struct adapter *adapter = arg;
944 struct ixgbe_hw *hw = &adapter->hw;
949 /* First get the cause */
950 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
951 /* Clear interrupt with write */
952 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
954 /* Link status change */
955 if (reg & IXGBE_EICR_LSC)
956 taskqueue_enqueue(adapter->tq, &adapter->link_task);
958 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
962 /*********************************************************************
964 * Media Ioctl callback
966 * This routine is called whenever the user queries the status of
967 * the interface using ifconfig.
969 **********************************************************************/
971 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
973 struct adapter *adapter = ifp->if_softc;
975 INIT_DEBUGOUT("ixv_media_status: begin");
976 IXGBE_CORE_LOCK(adapter);
977 ixv_update_link_status(adapter);
979 ifmr->ifm_status = IFM_AVALID;
980 ifmr->ifm_active = IFM_ETHER;
982 if (!adapter->link_active) {
983 IXGBE_CORE_UNLOCK(adapter);
987 ifmr->ifm_status |= IFM_ACTIVE;
989 switch (adapter->link_speed) {
990 case IXGBE_LINK_SPEED_1GB_FULL:
991 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
993 case IXGBE_LINK_SPEED_10GB_FULL:
994 ifmr->ifm_active |= IFM_FDX;
998 IXGBE_CORE_UNLOCK(adapter);
1003 /*********************************************************************
1005 * Media Ioctl callback
1007 * This routine is called when the user changes speed/duplex using
1008 * media/mediopt option with ifconfig.
1010 **********************************************************************/
1012 ixv_media_change(struct ifnet * ifp)
1014 struct adapter *adapter = ifp->if_softc;
1015 struct ifmedia *ifm = &adapter->media;
1017 INIT_DEBUGOUT("ixv_media_change: begin");
1019 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1022 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1026 device_printf(adapter->dev, "Only auto media type\n");
1034 /*********************************************************************
1037 * This routine is called whenever multicast address list is updated.
1039 **********************************************************************/
1040 #define IXGBE_RAR_ENTRIES 16
1043 ixv_set_multi(struct adapter *adapter)
1045 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1047 struct ifmultiaddr *ifma;
1049 struct ifnet *ifp = adapter->ifp;
1051 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1053 #if __FreeBSD_version < 800000
1056 if_maddr_rlock(ifp);
1058 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1059 if (ifma->ifma_addr->sa_family != AF_LINK)
1061 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1062 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1063 IXGBE_ETH_LENGTH_OF_ADDRESS);
1066 #if __FreeBSD_version < 800000
1067 IF_ADDR_UNLOCK(ifp);
1069 if_maddr_runlock(ifp);
1074 ixgbe_update_mc_addr_list(&adapter->hw,
1075 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1081 * This is an iterator function now needed by the multicast
1082 * shared code. It simply feeds the shared code routine the
1083 * addresses in the array of ixv_set_multi() one by one.
1086 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1088 u8 *addr = *update_ptr;
1092 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1093 *update_ptr = newptr;
1097 /*********************************************************************
1100 * This routine checks for link status,updates statistics,
1101 * and runs the watchdog check.
1103 **********************************************************************/
1106 ixv_local_timer(void *arg)
1108 struct adapter *adapter = arg;
1109 device_t dev = adapter->dev;
1110 struct ix_queue *que = adapter->queues;
1114 mtx_assert(&adapter->core_mtx, MA_OWNED);
1116 ixv_update_link_status(adapter);
1119 ixv_update_stats(adapter);
1122 ** Check the TX queues status
1123 ** - mark hung queues so we don't schedule on them
1124 ** - watchdog only if all queues show hung
1126 for (int i = 0; i < adapter->num_queues; i++, que++) {
1127 /* Keep track of queues with work for soft irq */
1129 queues |= ((u64)1 << que->me);
1131 ** Each time txeof runs without cleaning, but there
1132 ** are uncleaned descriptors it increments busy. If
1133 ** we get to the MAX we declare it hung.
1135 if (que->busy == IXGBE_QUEUE_HUNG) {
1137 /* Mark the queue as inactive */
1138 adapter->active_queues &= ~((u64)1 << que->me);
1141 /* Check if we've come back from hung */
1142 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1143 adapter->active_queues |= ((u64)1 << que->me);
1145 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1146 device_printf(dev,"Warning queue %d "
1147 "appears to be hung!\n", i);
1148 que->txr->busy = IXGBE_QUEUE_HUNG;
1154 /* Only truely watchdog if all queues show hung */
1155 if (hung == adapter->num_queues)
1157 else if (queues != 0) { /* Force an IRQ on queues with work */
1158 ixv_rearm_queues(adapter, queues);
1161 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1165 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1166 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1167 adapter->watchdog_events++;
1168 ixv_init_locked(adapter);
1172 ** Note: this routine updates the OS on the link state
1173 ** the real check of the hardware only happens with
1174 ** a link interrupt.
1177 ixv_update_link_status(struct adapter *adapter)
1179 struct ifnet *ifp = adapter->ifp;
1180 device_t dev = adapter->dev;
1182 if (adapter->link_up){
1183 if (adapter->link_active == FALSE) {
1185 device_printf(dev,"Link is up %d Gbps %s \n",
1186 ((adapter->link_speed == 128)? 10:1),
1188 adapter->link_active = TRUE;
1189 if_link_state_change(ifp, LINK_STATE_UP);
1191 } else { /* Link down */
1192 if (adapter->link_active == TRUE) {
1194 device_printf(dev,"Link is Down\n");
1195 if_link_state_change(ifp, LINK_STATE_DOWN);
1196 adapter->link_active = FALSE;
1204 /*********************************************************************
1206 * This routine disables all traffic on the adapter by issuing a
1207 * global reset on the MAC and deallocates TX/RX buffers.
1209 **********************************************************************/
1215 struct adapter *adapter = arg;
1216 struct ixgbe_hw *hw = &adapter->hw;
1219 mtx_assert(&adapter->core_mtx, MA_OWNED);
1221 INIT_DEBUGOUT("ixv_stop: begin\n");
1222 ixv_disable_intr(adapter);
1224 /* Tell the stack that the interface is no longer active */
1225 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1228 adapter->hw.adapter_stopped = FALSE;
1229 ixgbe_stop_adapter(hw);
1230 callout_stop(&adapter->timer);
1232 /* reprogram the RAR[0] in case user changed it. */
1233 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1239 /*********************************************************************
1241 * Determine hardware revision.
1243 **********************************************************************/
1245 ixv_identify_hardware(struct adapter *adapter)
1247 device_t dev = adapter->dev;
1248 struct ixgbe_hw *hw = &adapter->hw;
1251 ** Make sure BUSMASTER is set, on a VM under
1252 ** KVM it may not be and will break things.
1254 pci_enable_busmaster(dev);
1256 /* Save off the information about this board */
1257 hw->vendor_id = pci_get_vendor(dev);
1258 hw->device_id = pci_get_device(dev);
1259 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1260 hw->subsystem_vendor_id =
1261 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1262 hw->subsystem_device_id =
1263 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1265 /* We need this to determine device-specific things */
1266 ixgbe_set_mac_type(hw);
1268 /* Set the right number of segments */
1269 adapter->num_segs = IXGBE_82599_SCATTER;
1274 /*********************************************************************
1276 * Setup MSIX Interrupt resources and handlers
1278 **********************************************************************/
1280 ixv_allocate_msix(struct adapter *adapter)
1282 device_t dev = adapter->dev;
1283 struct ix_queue *que = adapter->queues;
1284 struct tx_ring *txr = adapter->tx_rings;
1285 int error, rid, vector = 0;
1287 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1289 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1290 RF_SHAREABLE | RF_ACTIVE);
1291 if (que->res == NULL) {
1292 device_printf(dev,"Unable to allocate"
1293 " bus resource: que interrupt [%d]\n", vector);
1296 /* Set the handler function */
1297 error = bus_setup_intr(dev, que->res,
1298 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1299 ixv_msix_que, que, &que->tag);
1302 device_printf(dev, "Failed to register QUE handler");
1305 #if __FreeBSD_version >= 800504
1306 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1309 adapter->active_queues |= (u64)(1 << que->msix);
1311 ** Bind the msix vector, and thus the
1312 ** ring to the corresponding cpu.
1314 if (adapter->num_queues > 1)
1315 bus_bind_intr(dev, que->res, i);
1316 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1317 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1318 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1319 taskqueue_thread_enqueue, &que->tq);
1320 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1321 device_get_nameunit(adapter->dev));
1326 adapter->res = bus_alloc_resource_any(dev,
1327 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1328 if (!adapter->res) {
1329 device_printf(dev,"Unable to allocate"
1330 " bus resource: MBX interrupt [%d]\n", rid);
1333 /* Set the mbx handler function */
1334 error = bus_setup_intr(dev, adapter->res,
1335 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1336 ixv_msix_mbx, adapter, &adapter->tag);
1338 adapter->res = NULL;
1339 device_printf(dev, "Failed to register LINK handler");
1342 #if __FreeBSD_version >= 800504
1343 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1345 adapter->vector = vector;
1346 /* Tasklets for Mailbox */
1347 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1348 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1349 taskqueue_thread_enqueue, &adapter->tq);
1350 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1351 device_get_nameunit(adapter->dev));
1353 ** Due to a broken design QEMU will fail to properly
1354 ** enable the guest for MSIX unless the vectors in
1355 ** the table are all set up, so we must rewrite the
1356 ** ENABLE in the MSIX control register again at this
1357 ** point to cause it to successfully initialize us.
1359 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1361 pci_find_cap(dev, PCIY_MSIX, &rid);
1362 rid += PCIR_MSIX_CTRL;
1363 msix_ctrl = pci_read_config(dev, rid, 2);
1364 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1365 pci_write_config(dev, rid, msix_ctrl, 2);
1372 * Setup MSIX resources, note that the VF
1373 * device MUST use MSIX, there is no fallback.
1376 ixv_setup_msix(struct adapter *adapter)
1378 device_t dev = adapter->dev;
1379 int rid, want, msgs;
1382 /* Must have at least 2 MSIX vectors */
1383 msgs = pci_msix_count(dev);
1387 adapter->msix_mem = bus_alloc_resource_any(dev,
1388 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1389 if (adapter->msix_mem == NULL) {
1390 device_printf(adapter->dev,
1391 "Unable to map MSIX table \n");
1396 ** Want vectors for the queues,
1397 ** plus an additional for mailbox.
1399 want = adapter->num_queues + 1;
1402 adapter->num_queues = msgs - 1;
1405 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1406 device_printf(adapter->dev,
1407 "Using MSIX interrupts with %d vectors\n", want);
1410 /* Release in case alloc was insufficient */
1411 pci_release_msi(dev);
1413 if (adapter->msix_mem != NULL) {
1414 bus_release_resource(dev, SYS_RES_MEMORY,
1415 rid, adapter->msix_mem);
1416 adapter->msix_mem = NULL;
1418 device_printf(adapter->dev,"MSIX config error\n");
1424 ixv_allocate_pci_resources(struct adapter *adapter)
1427 device_t dev = adapter->dev;
1430 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1433 if (!(adapter->pci_mem)) {
1434 device_printf(dev, "Unable to allocate bus resource: memory\n");
1438 adapter->osdep.mem_bus_space_tag =
1439 rman_get_bustag(adapter->pci_mem);
1440 adapter->osdep.mem_bus_space_handle =
1441 rman_get_bushandle(adapter->pci_mem);
1442 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1444 /* Pick up the tuneable queues */
1445 adapter->num_queues = ixv_num_queues;
1446 adapter->hw.back = adapter;
1449 ** Now setup MSI/X, should
1450 ** return us the number of
1451 ** configured vectors.
1453 adapter->msix = ixv_setup_msix(adapter);
1454 if (adapter->msix == ENXIO)
1461 ixv_free_pci_resources(struct adapter * adapter)
1463 struct ix_queue *que = adapter->queues;
1464 device_t dev = adapter->dev;
1467 memrid = PCIR_BAR(MSIX_82598_BAR);
1470 ** There is a slight possibility of a failure mode
1471 ** in attach that will result in entering this function
1472 ** before interrupt resources have been initialized, and
1473 ** in that case we do not want to execute the loops below
1474 ** We can detect this reliably by the state of the adapter
1477 if (adapter->res == NULL)
1481 ** Release all msix queue resources:
1483 for (int i = 0; i < adapter->num_queues; i++, que++) {
1484 rid = que->msix + 1;
1485 if (que->tag != NULL) {
1486 bus_teardown_intr(dev, que->res, que->tag);
1489 if (que->res != NULL)
1490 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1494 /* Clean the Legacy or Link interrupt last */
1495 if (adapter->vector) /* we are doing MSIX */
1496 rid = adapter->vector + 1;
1498 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1500 if (adapter->tag != NULL) {
1501 bus_teardown_intr(dev, adapter->res, adapter->tag);
1502 adapter->tag = NULL;
1504 if (adapter->res != NULL)
1505 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1509 pci_release_msi(dev);
1511 if (adapter->msix_mem != NULL)
1512 bus_release_resource(dev, SYS_RES_MEMORY,
1513 memrid, adapter->msix_mem);
1515 if (adapter->pci_mem != NULL)
1516 bus_release_resource(dev, SYS_RES_MEMORY,
1517 PCIR_BAR(0), adapter->pci_mem);
1522 /*********************************************************************
1524 * Setup networking device structure and register an interface.
1526 **********************************************************************/
1528 ixv_setup_interface(device_t dev, struct adapter *adapter)
1532 INIT_DEBUGOUT("ixv_setup_interface: begin");
1534 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1536 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1537 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1538 ifp->if_baudrate = 1000000000;
1539 ifp->if_init = ixv_init;
1540 ifp->if_softc = adapter;
1541 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1542 ifp->if_ioctl = ixv_ioctl;
1543 #if __FreeBSD_version >= 800000
1544 ifp->if_transmit = ixgbe_mq_start;
1545 ifp->if_qflush = ixgbe_qflush;
1547 ifp->if_start = ixgbe_start;
1549 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1551 ether_ifattach(ifp, adapter->hw.mac.addr);
1553 adapter->max_frame_size =
1554 ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1557 * Tell the upper layer(s) we support long frames.
1559 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1561 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1562 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1563 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1566 ifp->if_capabilities |= IFCAP_LRO;
1567 ifp->if_capenable = ifp->if_capabilities;
1570 * Specify the media types supported by this adapter and register
1571 * callbacks to update media and link information
1573 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1575 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1576 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1582 ixv_config_link(struct adapter *adapter)
1584 struct ixgbe_hw *hw = &adapter->hw;
1587 if (hw->mac.ops.check_link)
1588 hw->mac.ops.check_link(hw, &autoneg,
1589 &adapter->link_up, FALSE);
1593 /*********************************************************************
1595 * Enable transmit unit.
1597 **********************************************************************/
1599 ixv_initialize_transmit_units(struct adapter *adapter)
1601 struct tx_ring *txr = adapter->tx_rings;
1602 struct ixgbe_hw *hw = &adapter->hw;
1605 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1606 u64 tdba = txr->txdma.dma_paddr;
1609 /* Set WTHRESH to 8, burst writeback */
1610 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1611 txdctl |= (8 << 16);
1612 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1614 /* Set the HW Tx Head and Tail indices */
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1616 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1618 /* Set Tx Tail register */
1619 txr->tail = IXGBE_VFTDT(i);
1621 /* Set Ring parameters */
1622 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1623 (tdba & 0x00000000ffffffffULL));
1624 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1625 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1626 adapter->num_tx_desc *
1627 sizeof(struct ixgbe_legacy_tx_desc));
1628 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1629 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1630 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1633 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1634 txdctl |= IXGBE_TXDCTL_ENABLE;
1635 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1642 /*********************************************************************
1644 * Setup receive registers and features.
1646 **********************************************************************/
1647 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1650 ixv_initialize_receive_units(struct adapter *adapter)
1652 struct rx_ring *rxr = adapter->rx_rings;
1653 struct ixgbe_hw *hw = &adapter->hw;
1654 struct ifnet *ifp = adapter->ifp;
1655 u32 bufsz, rxcsum, psrtype;
1657 if (ifp->if_mtu > ETHERMTU)
1658 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1660 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1662 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1663 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1664 IXGBE_PSRTYPE_L2HDR;
1666 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1668 /* Tell PF our max_frame size */
1669 ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1671 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1672 u64 rdba = rxr->rxdma.dma_paddr;
1675 /* Disable the queue */
1676 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1677 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1678 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1679 for (int j = 0; j < 10; j++) {
1680 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1681 IXGBE_RXDCTL_ENABLE)
1687 /* Setup the Base and Length of the Rx Descriptor Ring */
1688 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1689 (rdba & 0x00000000ffffffffULL));
1690 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1692 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1693 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1695 /* Reset the ring indices */
1696 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1697 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1699 /* Set up the SRRCTL register */
1700 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1701 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1702 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1704 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1705 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1707 /* Capture Rx Tail register */
1708 rxr->tail = IXGBE_VFRDT(rxr->me);
1710 /* Do the queue enabling last */
1711 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1712 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1713 for (int k = 0; k < 10; k++) {
1714 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1715 IXGBE_RXDCTL_ENABLE)
1722 /* Set the Tail Pointer */
1725 * In netmap mode, we must preserve the buffers made
1726 * available to userspace before the if_init()
1727 * (this is true by default on the TX side, because
1728 * init makes all buffers available to userspace).
1730 * netmap_reset() and the device specific routines
1731 * (e.g. ixgbe_setup_receive_rings()) map these
1732 * buffers at the end of the NIC ring, so here we
1733 * must set the RDT (tail) register to make sure
1734 * they are not overwritten.
1736 * In this driver the NIC ring starts at RDH = 0,
1737 * RDT points to the last slot available for reception (?),
1738 * so RDT = num_rx_desc - 1 means the whole ring is available.
1740 if (ifp->if_capenable & IFCAP_NETMAP) {
1741 struct netmap_adapter *na = NA(adapter->ifp);
1742 struct netmap_kring *kring = &na->rx_rings[i];
1743 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1745 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1747 #endif /* DEV_NETMAP */
1748 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1749 adapter->num_rx_desc - 1);
1752 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1754 if (ifp->if_capenable & IFCAP_RXCSUM)
1755 rxcsum |= IXGBE_RXCSUM_PCSD;
1757 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1758 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1760 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1766 ixv_setup_vlan_support(struct adapter *adapter)
1768 struct ixgbe_hw *hw = &adapter->hw;
1769 u32 ctrl, vid, vfta, retry;
1770 struct rx_ring *rxr;
1773 ** We get here thru init_locked, meaning
1774 ** a soft reset, this has already cleared
1775 ** the VFTA and other state, so if there
1776 ** have been no vlan's registered do nothing.
1778 if (adapter->num_vlans == 0)
1781 /* Enable the queues */
1782 for (int i = 0; i < adapter->num_queues; i++) {
1783 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1784 ctrl |= IXGBE_RXDCTL_VME;
1785 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1787 * Let Rx path know that it needs to store VLAN tag
1788 * as part of extra mbuf info.
1790 rxr = &adapter->rx_rings[i];
1791 rxr->vtag_strip = TRUE;
1795 ** A soft reset zero's out the VFTA, so
1796 ** we need to repopulate it now.
1798 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1799 if (ixv_shadow_vfta[i] == 0)
1801 vfta = ixv_shadow_vfta[i];
1803 ** Reconstruct the vlan id's
1804 ** based on the bits set in each
1805 ** of the array ints.
1807 for (int j = 0; j < 32; j++) {
1809 if ((vfta & (1 << j)) == 0)
1812 /* Call the shared code mailbox routine */
1813 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1822 ** This routine is run via an vlan config EVENT,
1823 ** it enables us to use the HW Filter table since
1824 ** we can get the vlan id. This just creates the
1825 ** entry in the soft version of the VFTA, init will
1826 ** repopulate the real table.
1829 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1831 struct adapter *adapter = ifp->if_softc;
1834 if (ifp->if_softc != arg) /* Not our event */
1837 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1840 IXGBE_CORE_LOCK(adapter);
1841 index = (vtag >> 5) & 0x7F;
1843 ixv_shadow_vfta[index] |= (1 << bit);
1844 ++adapter->num_vlans;
1845 /* Re-init to load the changes */
1846 ixv_init_locked(adapter);
1847 IXGBE_CORE_UNLOCK(adapter);
1851 ** This routine is run via an vlan
1852 ** unconfig EVENT, remove our entry
1853 ** in the soft vfta.
1856 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1858 struct adapter *adapter = ifp->if_softc;
1861 if (ifp->if_softc != arg)
1864 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1867 IXGBE_CORE_LOCK(adapter);
1868 index = (vtag >> 5) & 0x7F;
1870 ixv_shadow_vfta[index] &= ~(1 << bit);
1871 --adapter->num_vlans;
1872 /* Re-init to load the changes */
1873 ixv_init_locked(adapter);
1874 IXGBE_CORE_UNLOCK(adapter);
1878 ixv_enable_intr(struct adapter *adapter)
1880 struct ixgbe_hw *hw = &adapter->hw;
1881 struct ix_queue *que = adapter->queues;
1882 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1885 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1887 mask = IXGBE_EIMS_ENABLE_MASK;
1888 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1889 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1891 for (int i = 0; i < adapter->num_queues; i++, que++)
1892 ixv_enable_queue(adapter, que->msix);
1894 IXGBE_WRITE_FLUSH(hw);
1900 ixv_disable_intr(struct adapter *adapter)
1902 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1903 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1904 IXGBE_WRITE_FLUSH(&adapter->hw);
1909 ** Setup the correct IVAR register for a particular MSIX interrupt
1910 ** - entry is the register array entry
1911 ** - vector is the MSIX vector for this queue
1912 ** - type is RX/TX/MISC
1915 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1917 struct ixgbe_hw *hw = &adapter->hw;
1920 vector |= IXGBE_IVAR_ALLOC_VAL;
1922 if (type == -1) { /* MISC IVAR */
1923 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1926 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1927 } else { /* RX/TX IVARS */
1928 index = (16 * (entry & 1)) + (8 * type);
1929 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1930 ivar &= ~(0xFF << index);
1931 ivar |= (vector << index);
1932 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1937 ixv_configure_ivars(struct adapter *adapter)
1939 struct ix_queue *que = adapter->queues;
1941 for (int i = 0; i < adapter->num_queues; i++, que++) {
1942 /* First the RX queue entry */
1943 ixv_set_ivar(adapter, i, que->msix, 0);
1944 /* ... and the TX */
1945 ixv_set_ivar(adapter, i, que->msix, 1);
1946 /* Set an initial value in EITR */
1947 IXGBE_WRITE_REG(&adapter->hw,
1948 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1951 /* For the mailbox interrupt */
1952 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1957 ** Tasklet handler for MSIX MBX interrupts
1958 ** - do outside interrupt since it might sleep
1961 ixv_handle_mbx(void *context, int pending)
1963 struct adapter *adapter = context;
1965 ixgbe_check_link(&adapter->hw,
1966 &adapter->link_speed, &adapter->link_up, 0);
1967 ixv_update_link_status(adapter);
1971 ** The VF stats registers never have a truely virgin
1972 ** starting point, so this routine tries to make an
1973 ** artificial one, marking ground zero on attach as
1977 ixv_save_stats(struct adapter *adapter)
1979 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1980 adapter->stats.vf.saved_reset_vfgprc +=
1981 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1982 adapter->stats.vf.saved_reset_vfgptc +=
1983 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1984 adapter->stats.vf.saved_reset_vfgorc +=
1985 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1986 adapter->stats.vf.saved_reset_vfgotc +=
1987 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1988 adapter->stats.vf.saved_reset_vfmprc +=
1989 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1994 ixv_init_stats(struct adapter *adapter)
1996 struct ixgbe_hw *hw = &adapter->hw;
1998 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1999 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2000 adapter->stats.vf.last_vfgorc |=
2001 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2003 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2004 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2005 adapter->stats.vf.last_vfgotc |=
2006 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2008 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2010 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2011 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2012 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2013 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2014 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2017 #define UPDATE_STAT_32(reg, last, count) \
2019 u32 current = IXGBE_READ_REG(hw, reg); \
2020 if (current < last) \
2021 count += 0x100000000LL; \
2023 count &= 0xFFFFFFFF00000000LL; \
2027 #define UPDATE_STAT_36(lsb, msb, last, count) \
2029 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
2030 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
2031 u64 current = ((cur_msb << 32) | cur_lsb); \
2032 if (current < last) \
2033 count += 0x1000000000LL; \
2035 count &= 0xFFFFFFF000000000LL; \
2040 ** ixv_update_stats - Update the board statistics counters.
2043 ixv_update_stats(struct adapter *adapter)
2045 struct ixgbe_hw *hw = &adapter->hw;
2047 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2048 adapter->stats.vf.vfgprc);
2049 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2050 adapter->stats.vf.vfgptc);
2051 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2052 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2053 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2054 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2055 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2056 adapter->stats.vf.vfmprc);
2060 * Add statistic sysctls for the VF.
2063 ixv_add_stats_sysctls(struct adapter *adapter)
2065 device_t dev = adapter->dev;
2066 struct ix_queue *que = &adapter->queues[0];
2067 struct tx_ring *txr = que->txr;
2068 struct rx_ring *rxr = que->rxr;
2070 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2071 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2072 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2073 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2075 struct sysctl_oid *stat_node, *queue_node;
2076 struct sysctl_oid_list *stat_list, *queue_list;
2078 /* Driver Statistics */
2079 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2080 CTLFLAG_RD, &adapter->dropped_pkts,
2081 "Driver dropped packets");
2082 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2083 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2084 "m_defrag() failed");
2085 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2086 CTLFLAG_RD, &adapter->watchdog_events,
2087 "Watchdog timeouts");
2089 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2091 "VF Statistics (read from HW registers)");
2092 stat_list = SYSCTL_CHILDREN(stat_node);
2094 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2095 CTLFLAG_RD, &stats->vfgprc,
2096 "Good Packets Received");
2097 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2098 CTLFLAG_RD, &stats->vfgorc,
2099 "Good Octets Received");
2100 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2101 CTLFLAG_RD, &stats->vfmprc,
2102 "Multicast Packets Received");
2103 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2104 CTLFLAG_RD, &stats->vfgptc,
2105 "Good Packets Transmitted");
2106 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2107 CTLFLAG_RD, &stats->vfgotc,
2108 "Good Octets Transmitted");
2110 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2112 "Queue Statistics (collected by SW)");
2113 queue_list = SYSCTL_CHILDREN(queue_node);
2115 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2116 CTLFLAG_RD, &(que->irqs),
2118 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2119 CTLFLAG_RD, &(rxr->rx_irq),
2120 "RX irqs on queue");
2121 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2122 CTLFLAG_RD, &(rxr->rx_packets),
2124 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2125 CTLFLAG_RD, &(rxr->rx_bytes),
2127 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2128 CTLFLAG_RD, &(rxr->rx_discarded),
2129 "Discarded RX packets");
2131 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2132 CTLFLAG_RD, &(txr->total_packets),
2134 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2135 CTLFLAG_RD, &(txr->bytes), 0,
2137 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2138 CTLFLAG_RD, &(txr->no_desc_avail),
2139 "# of times not enough descriptors were available during TX");
2143 ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2144 const char *description, int *limit, int value)
2147 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2148 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2149 OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2152 /**********************************************************************
2154 * This routine is called only when em_display_debug_stats is enabled.
2155 * This routine provides a way to take a look at important statistics
2156 * maintained by the driver and hardware.
2158 **********************************************************************/
2160 ixv_print_debug_info(struct adapter *adapter)
2162 device_t dev = adapter->dev;
2163 struct ixgbe_hw *hw = &adapter->hw;
2164 struct ix_queue *que = adapter->queues;
2165 struct rx_ring *rxr;
2166 struct tx_ring *txr;
2167 struct lro_ctrl *lro;
2169 device_printf(dev,"Error Byte Count = %u \n",
2170 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2172 for (int i = 0; i < adapter->num_queues; i++, que++) {
2176 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2177 que->msix, (long)que->irqs);
2178 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2179 rxr->me, (long long)rxr->rx_packets);
2180 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2181 rxr->me, (long)rxr->rx_bytes);
2182 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2183 rxr->me, lro->lro_queued);
2184 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2185 rxr->me, lro->lro_flushed);
2186 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2187 txr->me, (long)txr->total_packets);
2188 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2189 txr->me, (long)txr->no_desc_avail);
2192 device_printf(dev,"MBX IRQ Handled: %lu\n",
2193 (long)adapter->link_irq);
2198 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2201 struct adapter *adapter;
2204 error = sysctl_handle_int(oidp, &result, 0, req);
2206 if (error || !req->newptr)
2210 adapter = (struct adapter *) arg1;
2211 ixv_print_debug_info(adapter);