1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
45 *********************************************************************/
46 char ixv_driver_version[] = "1.2.5";
48 /*********************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64 /* required last entry */
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
72 static char *ixv_strings[] = {
73 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 /*********************************************************************
78 *********************************************************************/
79 static int ixv_probe(device_t);
80 static int ixv_attach(device_t);
81 static int ixv_detach(device_t);
82 static int ixv_shutdown(device_t);
83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void ixv_init(void *);
85 static void ixv_init_locked(struct adapter *);
86 static void ixv_stop(void *);
87 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int ixv_media_change(struct ifnet *);
89 static void ixv_identify_hardware(struct adapter *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_setup_msix(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
96 static void ixv_config_link(struct adapter *);
98 static void ixv_initialize_transmit_units(struct adapter *);
99 static void ixv_initialize_receive_units(struct adapter *);
101 static void ixv_enable_intr(struct adapter *);
102 static void ixv_disable_intr(struct adapter *);
103 static void ixv_set_multi(struct adapter *);
104 static void ixv_update_link_status(struct adapter *);
105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void ixv_configure_ivars(struct adapter *);
108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110 static void ixv_setup_vlan_support(struct adapter *);
111 static void ixv_register_vlan(void *, struct ifnet *, u16);
112 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
114 static void ixv_save_stats(struct adapter *);
115 static void ixv_init_stats(struct adapter *);
116 static void ixv_update_stats(struct adapter *);
117 static void ixv_add_stats_sysctls(struct adapter *);
119 /* The MSI/X Interrupt handlers */
120 static void ixv_msix_que(void *);
121 static void ixv_msix_mbx(void *);
123 /* Deferred interrupt tasklets */
124 static void ixv_handle_que(void *, int);
125 static void ixv_handle_mbx(void *, int);
127 /*********************************************************************
128 * FreeBSD Device Interface Entry Points
129 *********************************************************************/
131 static device_method_t ixv_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, ixv_probe),
134 DEVMETHOD(device_attach, ixv_attach),
135 DEVMETHOD(device_detach, ixv_detach),
136 DEVMETHOD(device_shutdown, ixv_shutdown),
140 static driver_t ixv_driver = {
141 "ixv", ixv_methods, sizeof(struct adapter),
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_DEPEND(ixv, pci, 1, 1, 1);
147 MODULE_DEPEND(ixv, ether, 1, 1, 1);
148 /* XXX depend on 'ix' ? */
151 ** TUNEABLE PARAMETERS:
155 ** AIM: Adaptive Interrupt Moderation
156 ** which means that the interrupt rate
157 ** is varied over time based on the
158 ** traffic for that interrupt vector
160 static int ixv_enable_aim = FALSE;
161 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
163 /* How many packets rxeof tries to clean at a time */
164 static int ixv_rx_process_limit = 256;
165 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
167 /* How many packets txeof tries to clean at a time */
168 static int ixv_tx_process_limit = 256;
169 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
171 /* Flow control setting, default to full */
172 static int ixv_flow_control = ixgbe_fc_full;
173 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
176 * Header split: this causes the hardware to DMA
177 * the header into a seperate mbuf from the payload,
178 * it can be a performance win in some workloads, but
179 * in others it actually hurts, its off by default.
181 static int ixv_header_split = FALSE;
182 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
185 ** Number of TX descriptors per ring,
186 ** setting higher than RX as this seems
187 ** the better performing choice.
189 static int ixv_txd = DEFAULT_TXD;
190 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
192 /* Number of RX descriptors per ring */
193 static int ixv_rxd = DEFAULT_RXD;
194 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
197 ** Shadow VFTA table, this is needed because
198 ** the real filter table gets cleared during
199 ** a soft reset and we need to repopulate it.
201 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
203 /*********************************************************************
204 * Device identification routine
206 * ixv_probe determines if the driver should be loaded on
207 * adapter based on PCI vendor/device id of the adapter.
209 * return BUS_PROBE_DEFAULT on success, positive on failure
210 *********************************************************************/
213 ixv_probe(device_t dev)
215 ixgbe_vendor_info_t *ent;
217 u16 pci_vendor_id = 0;
218 u16 pci_device_id = 0;
219 u16 pci_subvendor_id = 0;
220 u16 pci_subdevice_id = 0;
221 char adapter_name[256];
224 pci_vendor_id = pci_get_vendor(dev);
225 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
228 pci_device_id = pci_get_device(dev);
229 pci_subvendor_id = pci_get_subvendor(dev);
230 pci_subdevice_id = pci_get_subdevice(dev);
232 ent = ixv_vendor_info_array;
233 while (ent->vendor_id != 0) {
234 if ((pci_vendor_id == ent->vendor_id) &&
235 (pci_device_id == ent->device_id) &&
237 ((pci_subvendor_id == ent->subvendor_id) ||
238 (ent->subvendor_id == 0)) &&
240 ((pci_subdevice_id == ent->subdevice_id) ||
241 (ent->subdevice_id == 0))) {
242 sprintf(adapter_name, "%s, Version - %s",
243 ixv_strings[ent->index],
245 device_set_desc_copy(dev, adapter_name);
246 return (BUS_PROBE_DEFAULT);
253 /*********************************************************************
254 * Device initialization routine
256 * The attach entry point is called when the driver is being loaded.
257 * This routine identifies the type of hardware, allocates all resources
258 * and initializes the hardware.
260 * return 0 on success, positive on failure
261 *********************************************************************/
264 ixv_attach(device_t dev)
266 struct adapter *adapter;
270 INIT_DEBUGOUT("ixv_attach: begin");
272 /* Allocate, clear, and link in our adapter structure */
273 adapter = device_get_softc(dev);
274 adapter->dev = adapter->osdep.dev = dev;
278 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
281 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
282 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
283 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
284 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
286 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
287 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
288 OID_AUTO, "enable_aim", CTLFLAG_RW,
289 &ixv_enable_aim, 1, "Interrupt Moderation");
291 /* Set up the timer callout */
292 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
294 /* Determine hardware revision */
295 ixv_identify_hardware(adapter);
297 /* Do base PCI setup - map BAR0 */
298 if (ixv_allocate_pci_resources(adapter)) {
299 device_printf(dev, "Allocation of PCI resources failed\n");
304 /* Do descriptor calc and sanity checks */
305 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
306 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
307 device_printf(dev, "TXD config issue, using default!\n");
308 adapter->num_tx_desc = DEFAULT_TXD;
310 adapter->num_tx_desc = ixv_txd;
312 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
313 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
314 device_printf(dev, "RXD config issue, using default!\n");
315 adapter->num_rx_desc = DEFAULT_RXD;
317 adapter->num_rx_desc = ixv_rxd;
319 /* Allocate our TX/RX Queues */
320 if (ixgbe_allocate_queues(adapter)) {
326 ** Initialize the shared code: its
327 ** at this point the mac type is set.
329 error = ixgbe_init_shared_code(hw);
331 device_printf(dev,"Shared Code Initialization Failure\n");
336 /* Setup the mailbox */
337 ixgbe_init_mbx_params_vf(hw);
341 error = ixgbe_init_hw(hw);
343 device_printf(dev,"Hardware Initialization Failure\n");
348 error = ixv_allocate_msix(adapter);
352 /* If no mac address was assigned, make a random one */
353 if (!ixv_check_ether_addr(hw->mac.addr)) {
354 u8 addr[ETHER_ADDR_LEN];
355 arc4rand(&addr, sizeof(addr), 0);
358 bcopy(addr, hw->mac.addr, sizeof(addr));
361 /* Setup OS specific network interface */
362 ixv_setup_interface(dev, adapter);
364 /* Do the stats setup */
365 ixv_save_stats(adapter);
366 ixv_init_stats(adapter);
367 ixv_add_stats_sysctls(adapter);
369 /* Register for VLAN events */
370 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
371 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
372 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
373 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
375 INIT_DEBUGOUT("ixv_attach: end");
379 ixgbe_free_transmit_structures(adapter);
380 ixgbe_free_receive_structures(adapter);
382 ixv_free_pci_resources(adapter);
387 /*********************************************************************
388 * Device removal routine
390 * The detach entry point is called when the driver is being removed.
391 * This routine stops the adapter and deallocates all the resources
392 * that were allocated for driver operation.
394 * return 0 on success, positive on failure
395 *********************************************************************/
398 ixv_detach(device_t dev)
400 struct adapter *adapter = device_get_softc(dev);
401 struct ix_queue *que = adapter->queues;
403 INIT_DEBUGOUT("ixv_detach: begin");
405 /* Make sure VLANS are not using driver */
406 if (adapter->ifp->if_vlantrunk != NULL) {
407 device_printf(dev,"Vlan in use, detach first\n");
411 IXGBE_CORE_LOCK(adapter);
413 IXGBE_CORE_UNLOCK(adapter);
415 for (int i = 0; i < adapter->num_queues; i++, que++) {
417 struct tx_ring *txr = que->txr;
418 taskqueue_drain(que->tq, &txr->txq_task);
419 taskqueue_drain(que->tq, &que->que_task);
420 taskqueue_free(que->tq);
424 /* Drain the Mailbox(link) queue */
426 taskqueue_drain(adapter->tq, &adapter->link_task);
427 taskqueue_free(adapter->tq);
430 /* Unregister VLAN events */
431 if (adapter->vlan_attach != NULL)
432 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
433 if (adapter->vlan_detach != NULL)
434 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
436 ether_ifdetach(adapter->ifp);
437 callout_drain(&adapter->timer);
438 ixv_free_pci_resources(adapter);
439 bus_generic_detach(dev);
440 if_free(adapter->ifp);
442 ixgbe_free_transmit_structures(adapter);
443 ixgbe_free_receive_structures(adapter);
445 IXGBE_CORE_LOCK_DESTROY(adapter);
449 /*********************************************************************
451 * Shutdown entry point
453 **********************************************************************/
455 ixv_shutdown(device_t dev)
457 struct adapter *adapter = device_get_softc(dev);
458 IXGBE_CORE_LOCK(adapter);
460 IXGBE_CORE_UNLOCK(adapter);
465 /*********************************************************************
468 * ixv_ioctl is called when the user wants to configure the
471 * return 0 on success, positive on failure
472 **********************************************************************/
475 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
477 struct adapter *adapter = ifp->if_softc;
478 struct ifreq *ifr = (struct ifreq *) data;
479 #if defined(INET) || defined(INET6)
480 struct ifaddr *ifa = (struct ifaddr *) data;
481 bool avoid_reset = FALSE;
489 if (ifa->ifa_addr->sa_family == AF_INET)
493 if (ifa->ifa_addr->sa_family == AF_INET6)
496 #if defined(INET) || defined(INET6)
498 ** Calling init results in link renegotiation,
499 ** so we avoid doing it when possible.
502 ifp->if_flags |= IFF_UP;
503 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
505 if (!(ifp->if_flags & IFF_NOARP))
506 arp_ifinit(ifp, ifa);
508 error = ether_ioctl(ifp, command, data);
512 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
513 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
516 IXGBE_CORE_LOCK(adapter);
517 ifp->if_mtu = ifr->ifr_mtu;
518 adapter->max_frame_size =
519 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
520 ixv_init_locked(adapter);
521 IXGBE_CORE_UNLOCK(adapter);
525 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
526 IXGBE_CORE_LOCK(adapter);
527 if (ifp->if_flags & IFF_UP) {
528 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
529 ixv_init_locked(adapter);
531 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
533 adapter->if_flags = ifp->if_flags;
534 IXGBE_CORE_UNLOCK(adapter);
538 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
539 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
540 IXGBE_CORE_LOCK(adapter);
541 ixv_disable_intr(adapter);
542 ixv_set_multi(adapter);
543 ixv_enable_intr(adapter);
544 IXGBE_CORE_UNLOCK(adapter);
549 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
550 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
554 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
555 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
556 if (mask & IFCAP_HWCSUM)
557 ifp->if_capenable ^= IFCAP_HWCSUM;
558 if (mask & IFCAP_TSO4)
559 ifp->if_capenable ^= IFCAP_TSO4;
560 if (mask & IFCAP_LRO)
561 ifp->if_capenable ^= IFCAP_LRO;
562 if (mask & IFCAP_VLAN_HWTAGGING)
563 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
564 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565 IXGBE_CORE_LOCK(adapter);
566 ixv_init_locked(adapter);
567 IXGBE_CORE_UNLOCK(adapter);
569 VLAN_CAPABILITIES(ifp);
574 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
575 error = ether_ioctl(ifp, command, data);
582 /*********************************************************************
585 * This routine is used in two ways. It is used by the stack as
586 * init entry point in network interface structure. It is also used
587 * by the driver as a hw/sw initialization routine to get to a
590 * return 0 on success, positive on failure
591 **********************************************************************/
592 #define IXGBE_MHADD_MFS_SHIFT 16
595 ixv_init_locked(struct adapter *adapter)
597 struct ifnet *ifp = adapter->ifp;
598 device_t dev = adapter->dev;
599 struct ixgbe_hw *hw = &adapter->hw;
602 INIT_DEBUGOUT("ixv_init: begin");
603 mtx_assert(&adapter->core_mtx, MA_OWNED);
604 hw->adapter_stopped = FALSE;
605 ixgbe_stop_adapter(hw);
606 callout_stop(&adapter->timer);
608 /* reprogram the RAR[0] in case user changed it. */
609 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
611 /* Get the latest mac address, User can use a LAA */
612 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
613 IXGBE_ETH_LENGTH_OF_ADDRESS);
614 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
615 hw->addr_ctrl.rar_used_count = 1;
617 /* Prepare transmit descriptors and buffers */
618 if (ixgbe_setup_transmit_structures(adapter)) {
619 device_printf(dev,"Could not setup transmit structures\n");
625 ixv_initialize_transmit_units(adapter);
627 /* Setup Multicast table */
628 ixv_set_multi(adapter);
631 ** Determine the correct mbuf pool
632 ** for doing jumbo/headersplit
634 if (ifp->if_mtu > ETHERMTU)
635 adapter->rx_mbuf_sz = MJUMPAGESIZE;
637 adapter->rx_mbuf_sz = MCLBYTES;
639 /* Prepare receive descriptors and buffers */
640 if (ixgbe_setup_receive_structures(adapter)) {
641 device_printf(dev,"Could not setup receive structures\n");
646 /* Configure RX settings */
647 ixv_initialize_receive_units(adapter);
649 /* Enable Enhanced MSIX mode */
650 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
651 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
652 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
653 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
655 /* Set the various hardware offload abilities */
656 ifp->if_hwassist = 0;
657 if (ifp->if_capenable & IFCAP_TSO4)
658 ifp->if_hwassist |= CSUM_TSO;
659 if (ifp->if_capenable & IFCAP_TXCSUM) {
660 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
661 #if __FreeBSD_version >= 800000
662 ifp->if_hwassist |= CSUM_SCTP;
667 if (ifp->if_mtu > ETHERMTU) {
668 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
669 mhadd &= ~IXGBE_MHADD_MFS_MASK;
670 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
671 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
674 /* Set up VLAN offload and filter */
675 ixv_setup_vlan_support(adapter);
677 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
679 /* Set up MSI/X routing */
680 ixv_configure_ivars(adapter);
682 /* Set up auto-mask */
683 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
685 /* Set moderation on the Link interrupt */
686 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
689 ixv_init_stats(adapter);
691 /* Config/Enable Link */
692 ixv_config_link(adapter);
694 /* And now turn on interrupts */
695 ixv_enable_intr(adapter);
697 /* Now inform the stack we're ready */
698 ifp->if_drv_flags |= IFF_DRV_RUNNING;
699 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
707 struct adapter *adapter = arg;
709 IXGBE_CORE_LOCK(adapter);
710 ixv_init_locked(adapter);
711 IXGBE_CORE_UNLOCK(adapter);
718 ** MSIX Interrupt Handlers and Tasklets
723 ixv_enable_queue(struct adapter *adapter, u32 vector)
725 struct ixgbe_hw *hw = &adapter->hw;
726 u32 queue = 1 << vector;
729 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
730 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
734 ixv_disable_queue(struct adapter *adapter, u32 vector)
736 struct ixgbe_hw *hw = &adapter->hw;
737 u64 queue = (u64)(1 << vector);
740 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
741 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
745 ixv_rearm_queues(struct adapter *adapter, u64 queues)
747 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
753 ixv_handle_que(void *context, int pending)
755 struct ix_queue *que = context;
756 struct adapter *adapter = que->adapter;
757 struct tx_ring *txr = que->txr;
758 struct ifnet *ifp = adapter->ifp;
761 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
762 more = ixgbe_rxeof(que);
765 #if __FreeBSD_version >= 800000
766 if (!drbr_empty(ifp, txr->br))
767 ixgbe_mq_start_locked(ifp, txr);
769 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
770 ixgbe_start_locked(txr, ifp);
772 IXGBE_TX_UNLOCK(txr);
774 taskqueue_enqueue(que->tq, &que->que_task);
779 /* Reenable this interrupt */
780 ixv_enable_queue(adapter, que->msix);
784 /*********************************************************************
786 * MSI Queue Interrupt Service routine
788 **********************************************************************/
790 ixv_msix_que(void *arg)
792 struct ix_queue *que = arg;
793 struct adapter *adapter = que->adapter;
794 struct ifnet *ifp = adapter->ifp;
795 struct tx_ring *txr = que->txr;
796 struct rx_ring *rxr = que->rxr;
800 ixv_disable_queue(adapter, que->msix);
803 more = ixgbe_rxeof(que);
808 ** Make certain that if the stack
809 ** has anything queued the task gets
810 ** scheduled to handle it.
812 #ifdef IXGBE_LEGACY_TX
813 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
814 ixgbe_start_locked(txr, ifp);
816 if (!drbr_empty(adapter->ifp, txr->br))
817 ixgbe_mq_start_locked(ifp, txr);
819 IXGBE_TX_UNLOCK(txr);
823 if (ixv_enable_aim == FALSE)
826 ** Do Adaptive Interrupt Moderation:
827 ** - Write out last calculated setting
828 ** - Calculate based on average size over
829 ** the last interval.
831 if (que->eitr_setting)
832 IXGBE_WRITE_REG(&adapter->hw,
833 IXGBE_VTEITR(que->msix),
836 que->eitr_setting = 0;
838 /* Idle, do nothing */
839 if ((txr->bytes == 0) && (rxr->bytes == 0))
842 if ((txr->bytes) && (txr->packets))
843 newitr = txr->bytes/txr->packets;
844 if ((rxr->bytes) && (rxr->packets))
846 (rxr->bytes / rxr->packets));
847 newitr += 24; /* account for hardware frame, crc */
849 /* set an upper boundary */
850 newitr = min(newitr, 3000);
852 /* Be nice to the mid range */
853 if ((newitr > 300) && (newitr < 1200))
854 newitr = (newitr / 3);
856 newitr = (newitr / 2);
858 newitr |= newitr << 16;
860 /* save for next interrupt */
861 que->eitr_setting = newitr;
871 taskqueue_enqueue(que->tq, &que->que_task);
872 else /* Reenable this interrupt */
873 ixv_enable_queue(adapter, que->msix);
878 ixv_msix_mbx(void *arg)
880 struct adapter *adapter = arg;
881 struct ixgbe_hw *hw = &adapter->hw;
886 /* First get the cause */
887 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
888 /* Clear interrupt with write */
889 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
891 /* Link status change */
892 if (reg & IXGBE_EICR_LSC)
893 taskqueue_enqueue(adapter->tq, &adapter->link_task);
895 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
899 /*********************************************************************
901 * Media Ioctl callback
903 * This routine is called whenever the user queries the status of
904 * the interface using ifconfig.
906 **********************************************************************/
908 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
910 struct adapter *adapter = ifp->if_softc;
912 INIT_DEBUGOUT("ixv_media_status: begin");
913 IXGBE_CORE_LOCK(adapter);
914 ixv_update_link_status(adapter);
916 ifmr->ifm_status = IFM_AVALID;
917 ifmr->ifm_active = IFM_ETHER;
919 if (!adapter->link_active) {
920 IXGBE_CORE_UNLOCK(adapter);
924 ifmr->ifm_status |= IFM_ACTIVE;
926 switch (adapter->link_speed) {
927 case IXGBE_LINK_SPEED_1GB_FULL:
928 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
930 case IXGBE_LINK_SPEED_10GB_FULL:
931 ifmr->ifm_active |= IFM_FDX;
935 IXGBE_CORE_UNLOCK(adapter);
940 /*********************************************************************
942 * Media Ioctl callback
944 * This routine is called when the user changes speed/duplex using
945 * media/mediopt option with ifconfig.
947 **********************************************************************/
949 ixv_media_change(struct ifnet * ifp)
951 struct adapter *adapter = ifp->if_softc;
952 struct ifmedia *ifm = &adapter->media;
954 INIT_DEBUGOUT("ixv_media_change: begin");
956 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
959 switch (IFM_SUBTYPE(ifm->ifm_media)) {
963 device_printf(adapter->dev, "Only auto media type\n");
971 /*********************************************************************
974 * This routine is called whenever multicast address list is updated.
976 **********************************************************************/
977 #define IXGBE_RAR_ENTRIES 16
980 ixv_set_multi(struct adapter *adapter)
982 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
984 struct ifmultiaddr *ifma;
986 struct ifnet *ifp = adapter->ifp;
988 IOCTL_DEBUGOUT("ixv_set_multi: begin");
990 #if __FreeBSD_version < 800000
995 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
996 if (ifma->ifma_addr->sa_family != AF_LINK)
998 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
999 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1000 IXGBE_ETH_LENGTH_OF_ADDRESS);
1003 #if __FreeBSD_version < 800000
1004 IF_ADDR_UNLOCK(ifp);
1006 if_maddr_runlock(ifp);
1011 ixgbe_update_mc_addr_list(&adapter->hw,
1012 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1018 * This is an iterator function now needed by the multicast
1019 * shared code. It simply feeds the shared code routine the
1020 * addresses in the array of ixv_set_multi() one by one.
1023 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1025 u8 *addr = *update_ptr;
1029 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1030 *update_ptr = newptr;
1034 /*********************************************************************
1037 * This routine checks for link status,updates statistics,
1038 * and runs the watchdog check.
1040 **********************************************************************/
1043 ixv_local_timer(void *arg)
1045 struct adapter *adapter = arg;
1046 device_t dev = adapter->dev;
1047 struct ix_queue *que = adapter->queues;
1051 mtx_assert(&adapter->core_mtx, MA_OWNED);
1053 ixv_update_link_status(adapter);
1056 ixv_update_stats(adapter);
1059 ** Check the TX queues status
1060 ** - mark hung queues so we don't schedule on them
1061 ** - watchdog only if all queues show hung
1063 for (int i = 0; i < adapter->num_queues; i++, que++) {
1064 /* Keep track of queues with work for soft irq */
1066 queues |= ((u64)1 << que->me);
1068 ** Each time txeof runs without cleaning, but there
1069 ** are uncleaned descriptors it increments busy. If
1070 ** we get to the MAX we declare it hung.
1072 if (que->busy == IXGBE_QUEUE_HUNG) {
1074 /* Mark the queue as inactive */
1075 adapter->active_queues &= ~((u64)1 << que->me);
1078 /* Check if we've come back from hung */
1079 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1080 adapter->active_queues |= ((u64)1 << que->me);
1082 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1083 device_printf(dev,"Warning queue %d "
1084 "appears to be hung!\n", i);
1085 que->txr->busy = IXGBE_QUEUE_HUNG;
1091 /* Only truely watchdog if all queues show hung */
1092 if (hung == adapter->num_queues)
1094 else if (queues != 0) { /* Force an IRQ on queues with work */
1095 ixv_rearm_queues(adapter, queues);
1098 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1102 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1103 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1104 adapter->watchdog_events++;
1105 ixv_init_locked(adapter);
1109 ** Note: this routine updates the OS on the link state
1110 ** the real check of the hardware only happens with
1111 ** a link interrupt.
1114 ixv_update_link_status(struct adapter *adapter)
1116 struct ifnet *ifp = adapter->ifp;
1117 device_t dev = adapter->dev;
1119 if (adapter->link_up){
1120 if (adapter->link_active == FALSE) {
1122 device_printf(dev,"Link is up %d Gbps %s \n",
1123 ((adapter->link_speed == 128)? 10:1),
1125 adapter->link_active = TRUE;
1126 if_link_state_change(ifp, LINK_STATE_UP);
1128 } else { /* Link down */
1129 if (adapter->link_active == TRUE) {
1131 device_printf(dev,"Link is Down\n");
1132 if_link_state_change(ifp, LINK_STATE_DOWN);
1133 adapter->link_active = FALSE;
1141 /*********************************************************************
1143 * This routine disables all traffic on the adapter by issuing a
1144 * global reset on the MAC and deallocates TX/RX buffers.
1146 **********************************************************************/
1152 struct adapter *adapter = arg;
1153 struct ixgbe_hw *hw = &adapter->hw;
1156 mtx_assert(&adapter->core_mtx, MA_OWNED);
1158 INIT_DEBUGOUT("ixv_stop: begin\n");
1159 ixv_disable_intr(adapter);
1161 /* Tell the stack that the interface is no longer active */
1162 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1165 adapter->hw.adapter_stopped = FALSE;
1166 ixgbe_stop_adapter(hw);
1167 callout_stop(&adapter->timer);
1169 /* reprogram the RAR[0] in case user changed it. */
1170 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1176 /*********************************************************************
1178 * Determine hardware revision.
1180 **********************************************************************/
1182 ixv_identify_hardware(struct adapter *adapter)
1184 device_t dev = adapter->dev;
1185 struct ixgbe_hw *hw = &adapter->hw;
1188 ** Make sure BUSMASTER is set, on a VM under
1189 ** KVM it may not be and will break things.
1191 pci_enable_busmaster(dev);
1193 /* Save off the information about this board */
1194 hw->vendor_id = pci_get_vendor(dev);
1195 hw->device_id = pci_get_device(dev);
1196 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1197 hw->subsystem_vendor_id =
1198 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1199 hw->subsystem_device_id =
1200 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1202 /* We need this to determine device-specific things */
1203 ixgbe_set_mac_type(hw);
1205 /* Set the right number of segments */
1206 adapter->num_segs = IXGBE_82599_SCATTER;
1211 /*********************************************************************
1213 * Setup MSIX Interrupt resources and handlers
1215 **********************************************************************/
1217 ixv_allocate_msix(struct adapter *adapter)
1219 device_t dev = adapter->dev;
1220 struct ix_queue *que = adapter->queues;
1221 struct tx_ring *txr = adapter->tx_rings;
1222 int error, rid, vector = 0;
1224 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1226 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1227 RF_SHAREABLE | RF_ACTIVE);
1228 if (que->res == NULL) {
1229 device_printf(dev,"Unable to allocate"
1230 " bus resource: que interrupt [%d]\n", vector);
1233 /* Set the handler function */
1234 error = bus_setup_intr(dev, que->res,
1235 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1236 ixv_msix_que, que, &que->tag);
1239 device_printf(dev, "Failed to register QUE handler");
1242 #if __FreeBSD_version >= 800504
1243 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1246 adapter->active_queues |= (u64)(1 << que->msix);
1248 ** Bind the msix vector, and thus the
1249 ** ring to the corresponding cpu.
1251 if (adapter->num_queues > 1)
1252 bus_bind_intr(dev, que->res, i);
1253 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1254 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1255 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1256 taskqueue_thread_enqueue, &que->tq);
1257 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1258 device_get_nameunit(adapter->dev));
1263 adapter->res = bus_alloc_resource_any(dev,
1264 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1265 if (!adapter->res) {
1266 device_printf(dev,"Unable to allocate"
1267 " bus resource: MBX interrupt [%d]\n", rid);
1270 /* Set the mbx handler function */
1271 error = bus_setup_intr(dev, adapter->res,
1272 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1273 ixv_msix_mbx, adapter, &adapter->tag);
1275 adapter->res = NULL;
1276 device_printf(dev, "Failed to register LINK handler");
1279 #if __FreeBSD_version >= 800504
1280 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1282 adapter->vector = vector;
1283 /* Tasklets for Mailbox */
1284 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1285 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1286 taskqueue_thread_enqueue, &adapter->tq);
1287 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1288 device_get_nameunit(adapter->dev));
1290 ** Due to a broken design QEMU will fail to properly
1291 ** enable the guest for MSIX unless the vectors in
1292 ** the table are all set up, so we must rewrite the
1293 ** ENABLE in the MSIX control register again at this
1294 ** point to cause it to successfully initialize us.
1296 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1298 pci_find_cap(dev, PCIY_MSIX, &rid);
1299 rid += PCIR_MSIX_CTRL;
1300 msix_ctrl = pci_read_config(dev, rid, 2);
1301 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1302 pci_write_config(dev, rid, msix_ctrl, 2);
1309 * Setup MSIX resources, note that the VF
1310 * device MUST use MSIX, there is no fallback.
1313 ixv_setup_msix(struct adapter *adapter)
1315 device_t dev = adapter->dev;
1319 /* First try MSI/X */
1321 adapter->msix_mem = bus_alloc_resource_any(dev,
1322 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1323 if (adapter->msix_mem == NULL) {
1324 device_printf(adapter->dev,
1325 "Unable to map MSIX table \n");
1330 ** Want two vectors: one for a queue,
1331 ** plus an additional for mailbox.
1334 if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
1335 device_printf(adapter->dev,
1336 "Using MSIX interrupts with %d vectors\n", want);
1339 /* Release in case alloc was insufficient */
1340 pci_release_msi(dev);
1342 if (adapter->msix_mem != NULL) {
1343 bus_release_resource(dev, SYS_RES_MEMORY,
1344 rid, adapter->msix_mem);
1345 adapter->msix_mem = NULL;
1347 device_printf(adapter->dev,"MSIX config error\n");
1353 ixv_allocate_pci_resources(struct adapter *adapter)
1356 device_t dev = adapter->dev;
1359 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1362 if (!(adapter->pci_mem)) {
1363 device_printf(dev,"Unable to allocate bus resource: memory\n");
1367 adapter->osdep.mem_bus_space_tag =
1368 rman_get_bustag(adapter->pci_mem);
1369 adapter->osdep.mem_bus_space_handle =
1370 rman_get_bushandle(adapter->pci_mem);
1371 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1373 adapter->num_queues = 1;
1374 adapter->hw.back = &adapter->osdep;
1377 ** Now setup MSI/X, should
1378 ** return us the number of
1379 ** configured vectors.
1381 adapter->msix = ixv_setup_msix(adapter);
1382 if (adapter->msix == ENXIO)
1389 ixv_free_pci_resources(struct adapter * adapter)
1391 struct ix_queue *que = adapter->queues;
1392 device_t dev = adapter->dev;
1395 memrid = PCIR_BAR(MSIX_82598_BAR);
1398 ** There is a slight possibility of a failure mode
1399 ** in attach that will result in entering this function
1400 ** before interrupt resources have been initialized, and
1401 ** in that case we do not want to execute the loops below
1402 ** We can detect this reliably by the state of the adapter
1405 if (adapter->res == NULL)
1409 ** Release all msix queue resources:
1411 for (int i = 0; i < adapter->num_queues; i++, que++) {
1412 rid = que->msix + 1;
1413 if (que->tag != NULL) {
1414 bus_teardown_intr(dev, que->res, que->tag);
1417 if (que->res != NULL)
1418 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1422 /* Clean the Legacy or Link interrupt last */
1423 if (adapter->vector) /* we are doing MSIX */
1424 rid = adapter->vector + 1;
1426 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1428 if (adapter->tag != NULL) {
1429 bus_teardown_intr(dev, adapter->res, adapter->tag);
1430 adapter->tag = NULL;
1432 if (adapter->res != NULL)
1433 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1437 pci_release_msi(dev);
1439 if (adapter->msix_mem != NULL)
1440 bus_release_resource(dev, SYS_RES_MEMORY,
1441 memrid, adapter->msix_mem);
1443 if (adapter->pci_mem != NULL)
1444 bus_release_resource(dev, SYS_RES_MEMORY,
1445 PCIR_BAR(0), adapter->pci_mem);
1450 /*********************************************************************
1452 * Setup networking device structure and register an interface.
1454 **********************************************************************/
1456 ixv_setup_interface(device_t dev, struct adapter *adapter)
1460 INIT_DEBUGOUT("ixv_setup_interface: begin");
1462 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1464 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1465 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1466 ifp->if_baudrate = 1000000000;
1467 ifp->if_init = ixv_init;
1468 ifp->if_softc = adapter;
1469 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1470 ifp->if_ioctl = ixv_ioctl;
1471 #if __FreeBSD_version >= 800000
1472 ifp->if_transmit = ixgbe_mq_start;
1473 ifp->if_qflush = ixgbe_qflush;
1475 ifp->if_start = ixgbe_start;
1477 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1479 ether_ifattach(ifp, adapter->hw.mac.addr);
1481 adapter->max_frame_size =
1482 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1485 * Tell the upper layer(s) we support long frames.
1487 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1489 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1490 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1491 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1494 ifp->if_capabilities |= IFCAP_LRO;
1495 ifp->if_capenable = ifp->if_capabilities;
1498 * Specify the media types supported by this adapter and register
1499 * callbacks to update media and link information
1501 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1503 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1504 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1505 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1511 ixv_config_link(struct adapter *adapter)
1513 struct ixgbe_hw *hw = &adapter->hw;
1514 u32 autoneg, err = 0;
1516 if (hw->mac.ops.check_link)
1517 err = hw->mac.ops.check_link(hw, &autoneg,
1518 &adapter->link_up, FALSE);
1522 if (hw->mac.ops.setup_link)
1523 err = hw->mac.ops.setup_link(hw,
1524 autoneg, adapter->link_up);
1530 /*********************************************************************
1532 * Enable transmit unit.
1534 **********************************************************************/
1536 ixv_initialize_transmit_units(struct adapter *adapter)
1538 struct tx_ring *txr = adapter->tx_rings;
1539 struct ixgbe_hw *hw = &adapter->hw;
1542 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1543 u64 tdba = txr->txdma.dma_paddr;
1546 /* Set WTHRESH to 8, burst writeback */
1547 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1548 txdctl |= (8 << 16);
1549 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1551 /* Set the HW Tx Head and Tail indices */
1552 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1553 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1555 /* Set Tx Tail register */
1556 txr->tail = IXGBE_VFTDT(i);
1558 /* Set the processing limit */
1559 txr->process_limit = ixv_tx_process_limit;
1561 /* Set Ring parameters */
1562 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1563 (tdba & 0x00000000ffffffffULL));
1564 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1565 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1566 adapter->num_tx_desc *
1567 sizeof(struct ixgbe_legacy_tx_desc));
1568 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1569 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1570 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1573 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1574 txdctl |= IXGBE_TXDCTL_ENABLE;
1575 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1582 /*********************************************************************
1584 * Setup receive registers and features.
1586 **********************************************************************/
1587 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1590 ixv_initialize_receive_units(struct adapter *adapter)
1592 struct rx_ring *rxr = adapter->rx_rings;
1593 struct ixgbe_hw *hw = &adapter->hw;
1594 struct ifnet *ifp = adapter->ifp;
1595 u32 bufsz, fctrl, rxcsum, hlreg;
1598 /* Enable broadcasts */
1599 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1600 fctrl |= IXGBE_FCTRL_BAM;
1601 fctrl |= IXGBE_FCTRL_DPF;
1602 fctrl |= IXGBE_FCTRL_PMCF;
1603 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1605 /* Set for Jumbo Frames? */
1606 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1607 if (ifp->if_mtu > ETHERMTU) {
1608 hlreg |= IXGBE_HLREG0_JUMBOEN;
1609 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1611 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
1612 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1614 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
1616 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1617 u64 rdba = rxr->rxdma.dma_paddr;
1620 /* Setup the Base and Length of the Rx Descriptor Ring */
1621 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1622 (rdba & 0x00000000ffffffffULL));
1623 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1625 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1626 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1628 /* Set up the SRRCTL register */
1629 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1630 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1631 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1633 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1634 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1636 /* Setup the HW Rx Head and Tail Descriptor Pointers */
1637 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1638 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1639 adapter->num_rx_desc - 1);
1640 /* Set the processing limit */
1641 rxr->process_limit = ixv_rx_process_limit;
1643 /* Set Rx Tail register */
1644 rxr->tail = IXGBE_VFRDT(rxr->me);
1646 /* Do the queue enabling last */
1647 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1648 rxdctl |= IXGBE_RXDCTL_ENABLE;
1649 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1650 for (int k = 0; k < 10; k++) {
1651 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1652 IXGBE_RXDCTL_ENABLE)
1660 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1662 if (ifp->if_capenable & IFCAP_RXCSUM)
1663 rxcsum |= IXGBE_RXCSUM_PCSD;
1665 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1666 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1668 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1674 ixv_setup_vlan_support(struct adapter *adapter)
1676 struct ixgbe_hw *hw = &adapter->hw;
1677 u32 ctrl, vid, vfta, retry;
1681 ** We get here thru init_locked, meaning
1682 ** a soft reset, this has already cleared
1683 ** the VFTA and other state, so if there
1684 ** have been no vlan's registered do nothing.
1686 if (adapter->num_vlans == 0)
1689 /* Enable the queues */
1690 for (int i = 0; i < adapter->num_queues; i++) {
1691 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1692 ctrl |= IXGBE_RXDCTL_VME;
1693 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1697 ** A soft reset zero's out the VFTA, so
1698 ** we need to repopulate it now.
1700 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1701 if (ixv_shadow_vfta[i] == 0)
1703 vfta = ixv_shadow_vfta[i];
1705 ** Reconstruct the vlan id's
1706 ** based on the bits set in each
1707 ** of the array ints.
1709 for ( int j = 0; j < 32; j++) {
1711 if ((vfta & (1 << j)) == 0)
1714 /* Call the shared code mailbox routine */
1715 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1724 ** This routine is run via an vlan config EVENT,
1725 ** it enables us to use the HW Filter table since
1726 ** we can get the vlan id. This just creates the
1727 ** entry in the soft version of the VFTA, init will
1728 ** repopulate the real table.
1731 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1733 struct adapter *adapter = ifp->if_softc;
1736 if (ifp->if_softc != arg) /* Not our event */
1739 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1742 IXGBE_CORE_LOCK(adapter);
1743 index = (vtag >> 5) & 0x7F;
1745 ixv_shadow_vfta[index] |= (1 << bit);
1746 ++adapter->num_vlans;
1747 /* Re-init to load the changes */
1748 ixv_init_locked(adapter);
1749 IXGBE_CORE_UNLOCK(adapter);
1753 ** This routine is run via an vlan
1754 ** unconfig EVENT, remove our entry
1755 ** in the soft vfta.
1758 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1760 struct adapter *adapter = ifp->if_softc;
1763 if (ifp->if_softc != arg)
1766 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1769 IXGBE_CORE_LOCK(adapter);
1770 index = (vtag >> 5) & 0x7F;
1772 ixv_shadow_vfta[index] &= ~(1 << bit);
1773 --adapter->num_vlans;
1774 /* Re-init to load the changes */
1775 ixv_init_locked(adapter);
1776 IXGBE_CORE_UNLOCK(adapter);
1780 ixv_enable_intr(struct adapter *adapter)
1782 struct ixgbe_hw *hw = &adapter->hw;
1783 struct ix_queue *que = adapter->queues;
1784 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1787 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1789 mask = IXGBE_EIMS_ENABLE_MASK;
1790 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1791 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1793 for (int i = 0; i < adapter->num_queues; i++, que++)
1794 ixv_enable_queue(adapter, que->msix);
1796 IXGBE_WRITE_FLUSH(hw);
1802 ixv_disable_intr(struct adapter *adapter)
1804 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1806 IXGBE_WRITE_FLUSH(&adapter->hw);
1811 ** Setup the correct IVAR register for a particular MSIX interrupt
1812 ** - entry is the register array entry
1813 ** - vector is the MSIX vector for this queue
1814 ** - type is RX/TX/MISC
1817 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1819 struct ixgbe_hw *hw = &adapter->hw;
1822 vector |= IXGBE_IVAR_ALLOC_VAL;
1824 if (type == -1) { /* MISC IVAR */
1825 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1828 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1829 } else { /* RX/TX IVARS */
1830 index = (16 * (entry & 1)) + (8 * type);
1831 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1832 ivar &= ~(0xFF << index);
1833 ivar |= (vector << index);
1834 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1839 ixv_configure_ivars(struct adapter *adapter)
1841 struct ix_queue *que = adapter->queues;
1843 for (int i = 0; i < adapter->num_queues; i++, que++) {
1844 /* First the RX queue entry */
1845 ixv_set_ivar(adapter, i, que->msix, 0);
1846 /* ... and the TX */
1847 ixv_set_ivar(adapter, i, que->msix, 1);
1848 /* Set an initial value in EITR */
1849 IXGBE_WRITE_REG(&adapter->hw,
1850 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1853 /* For the mailbox interrupt */
1854 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1859 ** Tasklet handler for MSIX MBX interrupts
1860 ** - do outside interrupt since it might sleep
1863 ixv_handle_mbx(void *context, int pending)
1865 struct adapter *adapter = context;
1867 ixgbe_check_link(&adapter->hw,
1868 &adapter->link_speed, &adapter->link_up, 0);
1869 ixv_update_link_status(adapter);
1873 ** The VF stats registers never have a truely virgin
1874 ** starting point, so this routine tries to make an
1875 ** artificial one, marking ground zero on attach as
1879 ixv_save_stats(struct adapter *adapter)
1881 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1882 adapter->stats.vf.saved_reset_vfgprc +=
1883 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1884 adapter->stats.vf.saved_reset_vfgptc +=
1885 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1886 adapter->stats.vf.saved_reset_vfgorc +=
1887 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1888 adapter->stats.vf.saved_reset_vfgotc +=
1889 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1890 adapter->stats.vf.saved_reset_vfmprc +=
1891 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1896 ixv_init_stats(struct adapter *adapter)
1898 struct ixgbe_hw *hw = &adapter->hw;
1900 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1901 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1902 adapter->stats.vf.last_vfgorc |=
1903 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1905 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1906 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1907 adapter->stats.vf.last_vfgotc |=
1908 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1910 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1912 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1913 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1914 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1915 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1916 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1919 #define UPDATE_STAT_32(reg, last, count) \
1921 u32 current = IXGBE_READ_REG(hw, reg); \
1922 if (current < last) \
1923 count += 0x100000000LL; \
1925 count &= 0xFFFFFFFF00000000LL; \
1929 #define UPDATE_STAT_36(lsb, msb, last, count) \
1931 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1932 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1933 u64 current = ((cur_msb << 32) | cur_lsb); \
1934 if (current < last) \
1935 count += 0x1000000000LL; \
1937 count &= 0xFFFFFFF000000000LL; \
1942 ** ixv_update_stats - Update the board statistics counters.
1945 ixv_update_stats(struct adapter *adapter)
1947 struct ixgbe_hw *hw = &adapter->hw;
1949 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1950 adapter->stats.vf.vfgprc);
1951 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1952 adapter->stats.vf.vfgptc);
1953 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1954 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1955 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1956 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1957 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1958 adapter->stats.vf.vfmprc);
1962 * Add statistic sysctls for the VF.
1965 ixv_add_stats_sysctls(struct adapter *adapter)
1967 device_t dev = adapter->dev;
1968 struct ix_queue *que = &adapter->queues[0];
1969 struct tx_ring *txr = que->txr;
1970 struct rx_ring *rxr = que->rxr;
1972 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1973 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1974 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1975 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1977 struct sysctl_oid *stat_node, *queue_node;
1978 struct sysctl_oid_list *stat_list, *queue_list;
1980 /* Driver Statistics */
1981 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1982 CTLFLAG_RD, &adapter->dropped_pkts,
1983 "Driver dropped packets");
1984 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1985 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
1986 "m_defrag() failed");
1987 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1988 CTLFLAG_RD, &adapter->watchdog_events,
1989 "Watchdog timeouts");
1991 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1993 "VF Statistics (read from HW registers)");
1994 stat_list = SYSCTL_CHILDREN(stat_node);
1996 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1997 CTLFLAG_RD, &stats->vfgprc,
1998 "Good Packets Received");
1999 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2000 CTLFLAG_RD, &stats->vfgorc,
2001 "Good Octets Received");
2002 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2003 CTLFLAG_RD, &stats->vfmprc,
2004 "Multicast Packets Received");
2005 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2006 CTLFLAG_RD, &stats->vfgptc,
2007 "Good Packets Transmitted");
2008 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2009 CTLFLAG_RD, &stats->vfgotc,
2010 "Good Octets Transmitted");
2012 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2014 "Queue Statistics (collected by SW)");
2015 queue_list = SYSCTL_CHILDREN(queue_node);
2017 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2018 CTLFLAG_RD, &(que->irqs),
2020 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2021 CTLFLAG_RD, &(rxr->rx_irq),
2022 "RX irqs on queue");
2023 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2024 CTLFLAG_RD, &(rxr->rx_packets),
2026 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2027 CTLFLAG_RD, &(rxr->rx_bytes),
2029 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2030 CTLFLAG_RD, &(rxr->rx_discarded),
2031 "Discarded RX packets");
2033 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2034 CTLFLAG_RD, &(txr->total_packets),
2036 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2037 CTLFLAG_RD, &(txr->bytes), 0,
2039 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2040 CTLFLAG_RD, &(txr->no_desc_avail),
2041 "# of times not enough descriptors were available during TX");
2044 /**********************************************************************
2046 * This routine is called only when em_display_debug_stats is enabled.
2047 * This routine provides a way to take a look at important statistics
2048 * maintained by the driver and hardware.
2050 **********************************************************************/
2052 ixv_print_debug_info(struct adapter *adapter)
2054 device_t dev = adapter->dev;
2055 struct ixgbe_hw *hw = &adapter->hw;
2056 struct ix_queue *que = adapter->queues;
2057 struct rx_ring *rxr;
2058 struct tx_ring *txr;
2059 struct lro_ctrl *lro;
2061 device_printf(dev,"Error Byte Count = %u \n",
2062 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2064 for (int i = 0; i < adapter->num_queues; i++, que++) {
2068 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2069 que->msix, (long)que->irqs);
2070 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2071 rxr->me, (long long)rxr->rx_packets);
2072 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2073 rxr->me, (long)rxr->rx_bytes);
2074 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2075 rxr->me, lro->lro_queued);
2076 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2077 rxr->me, lro->lro_flushed);
2078 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2079 txr->me, (long)txr->total_packets);
2080 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2081 txr->me, (long)txr->no_desc_avail);
2084 device_printf(dev,"MBX IRQ Handled: %lu\n",
2085 (long)adapter->link_irq);
2090 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2093 struct adapter *adapter;
2096 error = sysctl_handle_int(oidp, &result, 0, req);
2098 if (error || !req->newptr)
2102 adapter = (struct adapter *) arg1;
2103 ixv_print_debug_info(adapter);