1 /******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #ifndef IXGBE_STANDALONE_BUILD
38 #include "opt_inet6.h"
43 /*********************************************************************
45 *********************************************************************/
46 char ixv_driver_version[] = "1.4.0";
48 /*********************************************************************
51 * Used by probe to select devices to load on
52 * Last field stores an index into ixv_strings
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
58 static ixgbe_vendor_info_t ixv_vendor_info_array[] =
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64 /* required last entry */
68 /*********************************************************************
69 * Table of branding strings
70 *********************************************************************/
72 static char *ixv_strings[] = {
73 "Intel(R) PRO/10GbE Virtual Function Network Driver"
76 /*********************************************************************
78 *********************************************************************/
79 static int ixv_probe(device_t);
80 static int ixv_attach(device_t);
81 static int ixv_detach(device_t);
82 static int ixv_shutdown(device_t);
83 static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
84 static void ixv_init(void *);
85 static void ixv_init_locked(struct adapter *);
86 static void ixv_stop(void *);
87 static void ixv_media_status(struct ifnet *, struct ifmediareq *);
88 static int ixv_media_change(struct ifnet *);
89 static void ixv_identify_hardware(struct adapter *);
90 static int ixv_allocate_pci_resources(struct adapter *);
91 static int ixv_allocate_msix(struct adapter *);
92 static int ixv_setup_msix(struct adapter *);
93 static void ixv_free_pci_resources(struct adapter *);
94 static void ixv_local_timer(void *);
95 static void ixv_setup_interface(device_t, struct adapter *);
96 static void ixv_config_link(struct adapter *);
98 static void ixv_initialize_transmit_units(struct adapter *);
99 static void ixv_initialize_receive_units(struct adapter *);
101 static void ixv_enable_intr(struct adapter *);
102 static void ixv_disable_intr(struct adapter *);
103 static void ixv_set_multi(struct adapter *);
104 static void ixv_update_link_status(struct adapter *);
105 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
107 static void ixv_configure_ivars(struct adapter *);
108 static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
110 static void ixv_setup_vlan_support(struct adapter *);
111 static void ixv_register_vlan(void *, struct ifnet *, u16);
112 static void ixv_unregister_vlan(void *, struct ifnet *, u16);
114 static void ixv_save_stats(struct adapter *);
115 static void ixv_init_stats(struct adapter *);
116 static void ixv_update_stats(struct adapter *);
117 static void ixv_add_stats_sysctls(struct adapter *);
119 /* The MSI/X Interrupt handlers */
120 static void ixv_msix_que(void *);
121 static void ixv_msix_mbx(void *);
123 /* Deferred interrupt tasklets */
124 static void ixv_handle_que(void *, int);
125 static void ixv_handle_mbx(void *, int);
127 /*********************************************************************
128 * FreeBSD Device Interface Entry Points
129 *********************************************************************/
131 static device_method_t ixv_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, ixv_probe),
134 DEVMETHOD(device_attach, ixv_attach),
135 DEVMETHOD(device_detach, ixv_detach),
136 DEVMETHOD(device_shutdown, ixv_shutdown),
140 static driver_t ixv_driver = {
141 "ixv", ixv_methods, sizeof(struct adapter),
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_DEPEND(ixv, pci, 1, 1, 1);
147 MODULE_DEPEND(ixv, ether, 1, 1, 1);
148 /* XXX depend on 'ix' ? */
151 ** TUNEABLE PARAMETERS:
154 /* Number of Queues - do not exceed MSIX vectors - 1 */
155 static int ixv_num_queues = 1;
156 TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
159 ** AIM: Adaptive Interrupt Moderation
160 ** which means that the interrupt rate
161 ** is varied over time based on the
162 ** traffic for that interrupt vector
164 static int ixv_enable_aim = FALSE;
165 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
167 /* How many packets rxeof tries to clean at a time */
168 static int ixv_rx_process_limit = 256;
169 TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
171 /* How many packets txeof tries to clean at a time */
172 static int ixv_tx_process_limit = 256;
173 TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
175 /* Flow control setting, default to full */
176 static int ixv_flow_control = ixgbe_fc_full;
177 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
180 * Header split: this causes the hardware to DMA
181 * the header into a seperate mbuf from the payload,
182 * it can be a performance win in some workloads, but
183 * in others it actually hurts, its off by default.
185 static int ixv_header_split = FALSE;
186 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
189 ** Number of TX descriptors per ring,
190 ** setting higher than RX as this seems
191 ** the better performing choice.
193 static int ixv_txd = DEFAULT_TXD;
194 TUNABLE_INT("hw.ixv.txd", &ixv_txd);
196 /* Number of RX descriptors per ring */
197 static int ixv_rxd = DEFAULT_RXD;
198 TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
201 ** Shadow VFTA table, this is needed because
202 ** the real filter table gets cleared during
203 ** a soft reset and we need to repopulate it.
205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 /*********************************************************************
208 * Device identification routine
210 * ixv_probe determines if the driver should be loaded on
211 * adapter based on PCI vendor/device id of the adapter.
213 * return BUS_PROBE_DEFAULT on success, positive on failure
214 *********************************************************************/
217 ixv_probe(device_t dev)
219 ixgbe_vendor_info_t *ent;
221 u16 pci_vendor_id = 0;
222 u16 pci_device_id = 0;
223 u16 pci_subvendor_id = 0;
224 u16 pci_subdevice_id = 0;
225 char adapter_name[256];
228 pci_vendor_id = pci_get_vendor(dev);
229 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
232 pci_device_id = pci_get_device(dev);
233 pci_subvendor_id = pci_get_subvendor(dev);
234 pci_subdevice_id = pci_get_subdevice(dev);
236 ent = ixv_vendor_info_array;
237 while (ent->vendor_id != 0) {
238 if ((pci_vendor_id == ent->vendor_id) &&
239 (pci_device_id == ent->device_id) &&
241 ((pci_subvendor_id == ent->subvendor_id) ||
242 (ent->subvendor_id == 0)) &&
244 ((pci_subdevice_id == ent->subdevice_id) ||
245 (ent->subdevice_id == 0))) {
246 sprintf(adapter_name, "%s, Version - %s",
247 ixv_strings[ent->index],
249 device_set_desc_copy(dev, adapter_name);
250 return (BUS_PROBE_DEFAULT);
257 /*********************************************************************
258 * Device initialization routine
260 * The attach entry point is called when the driver is being loaded.
261 * This routine identifies the type of hardware, allocates all resources
262 * and initializes the hardware.
264 * return 0 on success, positive on failure
265 *********************************************************************/
268 ixv_attach(device_t dev)
270 struct adapter *adapter;
274 INIT_DEBUGOUT("ixv_attach: begin");
276 /* Allocate, clear, and link in our adapter structure */
277 adapter = device_get_softc(dev);
278 adapter->dev = adapter->osdep.dev = dev;
282 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
285 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
286 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
287 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
288 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
290 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
291 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
292 OID_AUTO, "enable_aim", CTLFLAG_RW,
293 &ixv_enable_aim, 1, "Interrupt Moderation");
295 /* Set up the timer callout */
296 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
298 /* Determine hardware revision */
299 ixv_identify_hardware(adapter);
301 /* Do base PCI setup - map BAR0 */
302 if (ixv_allocate_pci_resources(adapter)) {
303 device_printf(dev, "Allocation of PCI resources failed\n");
308 /* Do descriptor calc and sanity checks */
309 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
310 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
311 device_printf(dev, "TXD config issue, using default!\n");
312 adapter->num_tx_desc = DEFAULT_TXD;
314 adapter->num_tx_desc = ixv_txd;
316 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
317 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
318 device_printf(dev, "RXD config issue, using default!\n");
319 adapter->num_rx_desc = DEFAULT_RXD;
321 adapter->num_rx_desc = ixv_rxd;
323 /* Allocate our TX/RX Queues */
324 if (ixgbe_allocate_queues(adapter)) {
330 ** Initialize the shared code: its
331 ** at this point the mac type is set.
333 error = ixgbe_init_shared_code(hw);
335 device_printf(dev,"Shared Code Initialization Failure\n");
340 /* Setup the mailbox */
341 ixgbe_init_mbx_params_vf(hw);
345 /* Get the Mailbox API version */
346 device_printf(dev,"MBX API %d negotiation: %d\n",
348 ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
350 error = ixgbe_init_hw(hw);
352 device_printf(dev,"Hardware Initialization Failure\n");
357 error = ixv_allocate_msix(adapter);
361 /* If no mac address was assigned, make a random one */
362 if (!ixv_check_ether_addr(hw->mac.addr)) {
363 u8 addr[ETHER_ADDR_LEN];
364 arc4rand(&addr, sizeof(addr), 0);
367 bcopy(addr, hw->mac.addr, sizeof(addr));
370 /* Setup OS specific network interface */
371 ixv_setup_interface(dev, adapter);
373 /* Do the stats setup */
374 ixv_save_stats(adapter);
375 ixv_init_stats(adapter);
376 ixv_add_stats_sysctls(adapter);
378 /* Register for VLAN events */
379 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
380 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
381 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
382 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
384 INIT_DEBUGOUT("ixv_attach: end");
388 ixgbe_free_transmit_structures(adapter);
389 ixgbe_free_receive_structures(adapter);
391 ixv_free_pci_resources(adapter);
396 /*********************************************************************
397 * Device removal routine
399 * The detach entry point is called when the driver is being removed.
400 * This routine stops the adapter and deallocates all the resources
401 * that were allocated for driver operation.
403 * return 0 on success, positive on failure
404 *********************************************************************/
407 ixv_detach(device_t dev)
409 struct adapter *adapter = device_get_softc(dev);
410 struct ix_queue *que = adapter->queues;
412 INIT_DEBUGOUT("ixv_detach: begin");
414 /* Make sure VLANS are not using driver */
415 if (adapter->ifp->if_vlantrunk != NULL) {
416 device_printf(dev,"Vlan in use, detach first\n");
420 IXGBE_CORE_LOCK(adapter);
422 IXGBE_CORE_UNLOCK(adapter);
424 for (int i = 0; i < adapter->num_queues; i++, que++) {
426 struct tx_ring *txr = que->txr;
427 taskqueue_drain(que->tq, &txr->txq_task);
428 taskqueue_drain(que->tq, &que->que_task);
429 taskqueue_free(que->tq);
433 /* Drain the Mailbox(link) queue */
435 taskqueue_drain(adapter->tq, &adapter->link_task);
436 taskqueue_free(adapter->tq);
439 /* Unregister VLAN events */
440 if (adapter->vlan_attach != NULL)
441 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
442 if (adapter->vlan_detach != NULL)
443 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
445 ether_ifdetach(adapter->ifp);
446 callout_drain(&adapter->timer);
447 ixv_free_pci_resources(adapter);
448 bus_generic_detach(dev);
449 if_free(adapter->ifp);
451 ixgbe_free_transmit_structures(adapter);
452 ixgbe_free_receive_structures(adapter);
454 IXGBE_CORE_LOCK_DESTROY(adapter);
458 /*********************************************************************
460 * Shutdown entry point
462 **********************************************************************/
464 ixv_shutdown(device_t dev)
466 struct adapter *adapter = device_get_softc(dev);
467 IXGBE_CORE_LOCK(adapter);
469 IXGBE_CORE_UNLOCK(adapter);
474 /*********************************************************************
477 * ixv_ioctl is called when the user wants to configure the
480 * return 0 on success, positive on failure
481 **********************************************************************/
484 ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
486 struct adapter *adapter = ifp->if_softc;
487 struct ifreq *ifr = (struct ifreq *) data;
488 #if defined(INET) || defined(INET6)
489 struct ifaddr *ifa = (struct ifaddr *) data;
490 bool avoid_reset = FALSE;
498 if (ifa->ifa_addr->sa_family == AF_INET)
502 if (ifa->ifa_addr->sa_family == AF_INET6)
505 #if defined(INET) || defined(INET6)
507 ** Calling init results in link renegotiation,
508 ** so we avoid doing it when possible.
511 ifp->if_flags |= IFF_UP;
512 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
514 if (!(ifp->if_flags & IFF_NOARP))
515 arp_ifinit(ifp, ifa);
517 error = ether_ioctl(ifp, command, data);
521 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
522 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
525 IXGBE_CORE_LOCK(adapter);
526 ifp->if_mtu = ifr->ifr_mtu;
527 adapter->max_frame_size =
528 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
529 ixv_init_locked(adapter);
530 IXGBE_CORE_UNLOCK(adapter);
534 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
535 IXGBE_CORE_LOCK(adapter);
536 if (ifp->if_flags & IFF_UP) {
537 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
538 ixv_init_locked(adapter);
540 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
542 adapter->if_flags = ifp->if_flags;
543 IXGBE_CORE_UNLOCK(adapter);
547 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
548 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
549 IXGBE_CORE_LOCK(adapter);
550 ixv_disable_intr(adapter);
551 ixv_set_multi(adapter);
552 ixv_enable_intr(adapter);
553 IXGBE_CORE_UNLOCK(adapter);
558 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
559 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
563 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
564 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
565 if (mask & IFCAP_HWCSUM)
566 ifp->if_capenable ^= IFCAP_HWCSUM;
567 if (mask & IFCAP_TSO4)
568 ifp->if_capenable ^= IFCAP_TSO4;
569 if (mask & IFCAP_LRO)
570 ifp->if_capenable ^= IFCAP_LRO;
571 if (mask & IFCAP_VLAN_HWTAGGING)
572 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
573 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
574 IXGBE_CORE_LOCK(adapter);
575 ixv_init_locked(adapter);
576 IXGBE_CORE_UNLOCK(adapter);
578 VLAN_CAPABILITIES(ifp);
583 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
584 error = ether_ioctl(ifp, command, data);
591 /*********************************************************************
594 * This routine is used in two ways. It is used by the stack as
595 * init entry point in network interface structure. It is also used
596 * by the driver as a hw/sw initialization routine to get to a
599 * return 0 on success, positive on failure
600 **********************************************************************/
601 #define IXGBE_MHADD_MFS_SHIFT 16
604 ixv_init_locked(struct adapter *adapter)
606 struct ifnet *ifp = adapter->ifp;
607 device_t dev = adapter->dev;
608 struct ixgbe_hw *hw = &adapter->hw;
611 INIT_DEBUGOUT("ixv_init: begin");
612 mtx_assert(&adapter->core_mtx, MA_OWNED);
613 hw->adapter_stopped = FALSE;
614 ixgbe_stop_adapter(hw);
615 callout_stop(&adapter->timer);
617 /* reprogram the RAR[0] in case user changed it. */
618 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
620 /* Get the latest mac address, User can use a LAA */
621 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
622 IXGBE_ETH_LENGTH_OF_ADDRESS);
623 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
624 hw->addr_ctrl.rar_used_count = 1;
626 /* Prepare transmit descriptors and buffers */
627 if (ixgbe_setup_transmit_structures(adapter)) {
628 device_printf(dev,"Could not setup transmit structures\n");
634 ixv_initialize_transmit_units(adapter);
636 /* Setup Multicast table */
637 ixv_set_multi(adapter);
640 ** Determine the correct mbuf pool
641 ** for doing jumbo/headersplit
643 if (ifp->if_mtu > ETHERMTU)
644 adapter->rx_mbuf_sz = MJUMPAGESIZE;
646 adapter->rx_mbuf_sz = MCLBYTES;
648 /* Prepare receive descriptors and buffers */
649 if (ixgbe_setup_receive_structures(adapter)) {
650 device_printf(dev,"Could not setup receive structures\n");
655 /* Configure RX settings */
656 ixv_initialize_receive_units(adapter);
658 /* Enable Enhanced MSIX mode */
659 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
660 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
661 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
662 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
664 /* Set the various hardware offload abilities */
665 ifp->if_hwassist = 0;
666 if (ifp->if_capenable & IFCAP_TSO4)
667 ifp->if_hwassist |= CSUM_TSO;
668 if (ifp->if_capenable & IFCAP_TXCSUM) {
669 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
670 #if __FreeBSD_version >= 800000
671 ifp->if_hwassist |= CSUM_SCTP;
676 if (ifp->if_mtu > ETHERMTU) {
677 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
678 mhadd &= ~IXGBE_MHADD_MFS_MASK;
679 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
680 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
683 /* Set up VLAN offload and filter */
684 ixv_setup_vlan_support(adapter);
686 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
688 /* Set up MSI/X routing */
689 ixv_configure_ivars(adapter);
691 /* Set up auto-mask */
692 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
694 /* Set moderation on the Link interrupt */
695 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
698 ixv_init_stats(adapter);
700 /* Config/Enable Link */
701 ixv_config_link(adapter);
703 /* And now turn on interrupts */
704 ixv_enable_intr(adapter);
706 /* Now inform the stack we're ready */
707 ifp->if_drv_flags |= IFF_DRV_RUNNING;
708 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
716 struct adapter *adapter = arg;
718 IXGBE_CORE_LOCK(adapter);
719 ixv_init_locked(adapter);
720 IXGBE_CORE_UNLOCK(adapter);
727 ** MSIX Interrupt Handlers and Tasklets
732 ixv_enable_queue(struct adapter *adapter, u32 vector)
734 struct ixgbe_hw *hw = &adapter->hw;
735 u32 queue = 1 << vector;
738 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
739 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
743 ixv_disable_queue(struct adapter *adapter, u32 vector)
745 struct ixgbe_hw *hw = &adapter->hw;
746 u64 queue = (u64)(1 << vector);
749 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
750 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
754 ixv_rearm_queues(struct adapter *adapter, u64 queues)
756 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
757 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
762 ixv_handle_que(void *context, int pending)
764 struct ix_queue *que = context;
765 struct adapter *adapter = que->adapter;
766 struct tx_ring *txr = que->txr;
767 struct ifnet *ifp = adapter->ifp;
770 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
771 more = ixgbe_rxeof(que);
774 #if __FreeBSD_version >= 800000
775 if (!drbr_empty(ifp, txr->br))
776 ixgbe_mq_start_locked(ifp, txr);
778 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
779 ixgbe_start_locked(txr, ifp);
781 IXGBE_TX_UNLOCK(txr);
783 taskqueue_enqueue(que->tq, &que->que_task);
788 /* Reenable this interrupt */
789 ixv_enable_queue(adapter, que->msix);
793 /*********************************************************************
795 * MSI Queue Interrupt Service routine
797 **********************************************************************/
799 ixv_msix_que(void *arg)
801 struct ix_queue *que = arg;
802 struct adapter *adapter = que->adapter;
803 struct ifnet *ifp = adapter->ifp;
804 struct tx_ring *txr = que->txr;
805 struct rx_ring *rxr = que->rxr;
809 ixv_disable_queue(adapter, que->msix);
812 more = ixgbe_rxeof(que);
817 ** Make certain that if the stack
818 ** has anything queued the task gets
819 ** scheduled to handle it.
821 #ifdef IXGBE_LEGACY_TX
822 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
823 ixgbe_start_locked(txr, ifp);
825 if (!drbr_empty(adapter->ifp, txr->br))
826 ixgbe_mq_start_locked(ifp, txr);
828 IXGBE_TX_UNLOCK(txr);
832 if (ixv_enable_aim == FALSE)
835 ** Do Adaptive Interrupt Moderation:
836 ** - Write out last calculated setting
837 ** - Calculate based on average size over
838 ** the last interval.
840 if (que->eitr_setting)
841 IXGBE_WRITE_REG(&adapter->hw,
842 IXGBE_VTEITR(que->msix),
845 que->eitr_setting = 0;
847 /* Idle, do nothing */
848 if ((txr->bytes == 0) && (rxr->bytes == 0))
851 if ((txr->bytes) && (txr->packets))
852 newitr = txr->bytes/txr->packets;
853 if ((rxr->bytes) && (rxr->packets))
855 (rxr->bytes / rxr->packets));
856 newitr += 24; /* account for hardware frame, crc */
858 /* set an upper boundary */
859 newitr = min(newitr, 3000);
861 /* Be nice to the mid range */
862 if ((newitr > 300) && (newitr < 1200))
863 newitr = (newitr / 3);
865 newitr = (newitr / 2);
867 newitr |= newitr << 16;
869 /* save for next interrupt */
870 que->eitr_setting = newitr;
880 taskqueue_enqueue(que->tq, &que->que_task);
881 else /* Reenable this interrupt */
882 ixv_enable_queue(adapter, que->msix);
887 ixv_msix_mbx(void *arg)
889 struct adapter *adapter = arg;
890 struct ixgbe_hw *hw = &adapter->hw;
895 /* First get the cause */
896 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
897 /* Clear interrupt with write */
898 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
900 /* Link status change */
901 if (reg & IXGBE_EICR_LSC)
902 taskqueue_enqueue(adapter->tq, &adapter->link_task);
904 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
908 /*********************************************************************
910 * Media Ioctl callback
912 * This routine is called whenever the user queries the status of
913 * the interface using ifconfig.
915 **********************************************************************/
917 ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
919 struct adapter *adapter = ifp->if_softc;
921 INIT_DEBUGOUT("ixv_media_status: begin");
922 IXGBE_CORE_LOCK(adapter);
923 ixv_update_link_status(adapter);
925 ifmr->ifm_status = IFM_AVALID;
926 ifmr->ifm_active = IFM_ETHER;
928 if (!adapter->link_active) {
929 IXGBE_CORE_UNLOCK(adapter);
933 ifmr->ifm_status |= IFM_ACTIVE;
935 switch (adapter->link_speed) {
936 case IXGBE_LINK_SPEED_1GB_FULL:
937 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
939 case IXGBE_LINK_SPEED_10GB_FULL:
940 ifmr->ifm_active |= IFM_FDX;
944 IXGBE_CORE_UNLOCK(adapter);
949 /*********************************************************************
951 * Media Ioctl callback
953 * This routine is called when the user changes speed/duplex using
954 * media/mediopt option with ifconfig.
956 **********************************************************************/
958 ixv_media_change(struct ifnet * ifp)
960 struct adapter *adapter = ifp->if_softc;
961 struct ifmedia *ifm = &adapter->media;
963 INIT_DEBUGOUT("ixv_media_change: begin");
965 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
968 switch (IFM_SUBTYPE(ifm->ifm_media)) {
972 device_printf(adapter->dev, "Only auto media type\n");
980 /*********************************************************************
983 * This routine is called whenever multicast address list is updated.
985 **********************************************************************/
986 #define IXGBE_RAR_ENTRIES 16
989 ixv_set_multi(struct adapter *adapter)
991 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
993 struct ifmultiaddr *ifma;
995 struct ifnet *ifp = adapter->ifp;
997 IOCTL_DEBUGOUT("ixv_set_multi: begin");
999 #if __FreeBSD_version < 800000
1002 if_maddr_rlock(ifp);
1004 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1005 if (ifma->ifma_addr->sa_family != AF_LINK)
1007 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1008 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1009 IXGBE_ETH_LENGTH_OF_ADDRESS);
1012 #if __FreeBSD_version < 800000
1013 IF_ADDR_UNLOCK(ifp);
1015 if_maddr_runlock(ifp);
1020 ixgbe_update_mc_addr_list(&adapter->hw,
1021 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1027 * This is an iterator function now needed by the multicast
1028 * shared code. It simply feeds the shared code routine the
1029 * addresses in the array of ixv_set_multi() one by one.
1032 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1034 u8 *addr = *update_ptr;
1038 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1039 *update_ptr = newptr;
1043 /*********************************************************************
1046 * This routine checks for link status,updates statistics,
1047 * and runs the watchdog check.
1049 **********************************************************************/
1052 ixv_local_timer(void *arg)
1054 struct adapter *adapter = arg;
1055 device_t dev = adapter->dev;
1056 struct ix_queue *que = adapter->queues;
1060 mtx_assert(&adapter->core_mtx, MA_OWNED);
1062 ixv_update_link_status(adapter);
1065 ixv_update_stats(adapter);
1068 ** Check the TX queues status
1069 ** - mark hung queues so we don't schedule on them
1070 ** - watchdog only if all queues show hung
1072 for (int i = 0; i < adapter->num_queues; i++, que++) {
1073 /* Keep track of queues with work for soft irq */
1075 queues |= ((u64)1 << que->me);
1077 ** Each time txeof runs without cleaning, but there
1078 ** are uncleaned descriptors it increments busy. If
1079 ** we get to the MAX we declare it hung.
1081 if (que->busy == IXGBE_QUEUE_HUNG) {
1083 /* Mark the queue as inactive */
1084 adapter->active_queues &= ~((u64)1 << que->me);
1087 /* Check if we've come back from hung */
1088 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1089 adapter->active_queues |= ((u64)1 << que->me);
1091 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1092 device_printf(dev,"Warning queue %d "
1093 "appears to be hung!\n", i);
1094 que->txr->busy = IXGBE_QUEUE_HUNG;
1100 /* Only truely watchdog if all queues show hung */
1101 if (hung == adapter->num_queues)
1103 else if (queues != 0) { /* Force an IRQ on queues with work */
1104 ixv_rearm_queues(adapter, queues);
1107 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1111 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1112 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1113 adapter->watchdog_events++;
1114 ixv_init_locked(adapter);
1118 ** Note: this routine updates the OS on the link state
1119 ** the real check of the hardware only happens with
1120 ** a link interrupt.
1123 ixv_update_link_status(struct adapter *adapter)
1125 struct ifnet *ifp = adapter->ifp;
1126 device_t dev = adapter->dev;
1128 if (adapter->link_up){
1129 if (adapter->link_active == FALSE) {
1131 device_printf(dev,"Link is up %d Gbps %s \n",
1132 ((adapter->link_speed == 128)? 10:1),
1134 adapter->link_active = TRUE;
1135 if_link_state_change(ifp, LINK_STATE_UP);
1137 } else { /* Link down */
1138 if (adapter->link_active == TRUE) {
1140 device_printf(dev,"Link is Down\n");
1141 if_link_state_change(ifp, LINK_STATE_DOWN);
1142 adapter->link_active = FALSE;
1150 /*********************************************************************
1152 * This routine disables all traffic on the adapter by issuing a
1153 * global reset on the MAC and deallocates TX/RX buffers.
1155 **********************************************************************/
1161 struct adapter *adapter = arg;
1162 struct ixgbe_hw *hw = &adapter->hw;
1165 mtx_assert(&adapter->core_mtx, MA_OWNED);
1167 INIT_DEBUGOUT("ixv_stop: begin\n");
1168 ixv_disable_intr(adapter);
1170 /* Tell the stack that the interface is no longer active */
1171 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1174 adapter->hw.adapter_stopped = FALSE;
1175 ixgbe_stop_adapter(hw);
1176 callout_stop(&adapter->timer);
1178 /* reprogram the RAR[0] in case user changed it. */
1179 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1185 /*********************************************************************
1187 * Determine hardware revision.
1189 **********************************************************************/
1191 ixv_identify_hardware(struct adapter *adapter)
1193 device_t dev = adapter->dev;
1194 struct ixgbe_hw *hw = &adapter->hw;
1197 ** Make sure BUSMASTER is set, on a VM under
1198 ** KVM it may not be and will break things.
1200 pci_enable_busmaster(dev);
1202 /* Save off the information about this board */
1203 hw->vendor_id = pci_get_vendor(dev);
1204 hw->device_id = pci_get_device(dev);
1205 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1206 hw->subsystem_vendor_id =
1207 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1208 hw->subsystem_device_id =
1209 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1211 /* We need this to determine device-specific things */
1212 ixgbe_set_mac_type(hw);
1214 /* Set the right number of segments */
1215 adapter->num_segs = IXGBE_82599_SCATTER;
1220 /*********************************************************************
1222 * Setup MSIX Interrupt resources and handlers
1224 **********************************************************************/
1226 ixv_allocate_msix(struct adapter *adapter)
1228 device_t dev = adapter->dev;
1229 struct ix_queue *que = adapter->queues;
1230 struct tx_ring *txr = adapter->tx_rings;
1231 int error, rid, vector = 0;
1233 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1235 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1236 RF_SHAREABLE | RF_ACTIVE);
1237 if (que->res == NULL) {
1238 device_printf(dev,"Unable to allocate"
1239 " bus resource: que interrupt [%d]\n", vector);
1242 /* Set the handler function */
1243 error = bus_setup_intr(dev, que->res,
1244 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1245 ixv_msix_que, que, &que->tag);
1248 device_printf(dev, "Failed to register QUE handler");
1251 #if __FreeBSD_version >= 800504
1252 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1255 adapter->active_queues |= (u64)(1 << que->msix);
1257 ** Bind the msix vector, and thus the
1258 ** ring to the corresponding cpu.
1260 if (adapter->num_queues > 1)
1261 bus_bind_intr(dev, que->res, i);
1262 TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1263 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1264 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1265 taskqueue_thread_enqueue, &que->tq);
1266 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1267 device_get_nameunit(adapter->dev));
1272 adapter->res = bus_alloc_resource_any(dev,
1273 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1274 if (!adapter->res) {
1275 device_printf(dev,"Unable to allocate"
1276 " bus resource: MBX interrupt [%d]\n", rid);
1279 /* Set the mbx handler function */
1280 error = bus_setup_intr(dev, adapter->res,
1281 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1282 ixv_msix_mbx, adapter, &adapter->tag);
1284 adapter->res = NULL;
1285 device_printf(dev, "Failed to register LINK handler");
1288 #if __FreeBSD_version >= 800504
1289 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1291 adapter->vector = vector;
1292 /* Tasklets for Mailbox */
1293 TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1294 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1295 taskqueue_thread_enqueue, &adapter->tq);
1296 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1297 device_get_nameunit(adapter->dev));
1299 ** Due to a broken design QEMU will fail to properly
1300 ** enable the guest for MSIX unless the vectors in
1301 ** the table are all set up, so we must rewrite the
1302 ** ENABLE in the MSIX control register again at this
1303 ** point to cause it to successfully initialize us.
1305 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1307 pci_find_cap(dev, PCIY_MSIX, &rid);
1308 rid += PCIR_MSIX_CTRL;
1309 msix_ctrl = pci_read_config(dev, rid, 2);
1310 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1311 pci_write_config(dev, rid, msix_ctrl, 2);
1318 * Setup MSIX resources, note that the VF
1319 * device MUST use MSIX, there is no fallback.
1322 ixv_setup_msix(struct adapter *adapter)
1324 device_t dev = adapter->dev;
1325 int rid, want, msgs;
1328 /* Must have at least 2 MSIX vectors */
1329 msgs = pci_msix_count(dev);
1333 adapter->msix_mem = bus_alloc_resource_any(dev,
1334 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1335 if (adapter->msix_mem == NULL) {
1336 device_printf(adapter->dev,
1337 "Unable to map MSIX table \n");
1342 ** Want vectors for the queues,
1343 ** plus an additional for mailbox.
1345 want = adapter->num_queues + 1;
1348 adapter->num_queues = msgs - 1;
1351 if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1352 device_printf(adapter->dev,
1353 "Using MSIX interrupts with %d vectors\n", want);
1356 /* Release in case alloc was insufficient */
1357 pci_release_msi(dev);
1359 if (adapter->msix_mem != NULL) {
1360 bus_release_resource(dev, SYS_RES_MEMORY,
1361 rid, adapter->msix_mem);
1362 adapter->msix_mem = NULL;
1364 device_printf(adapter->dev,"MSIX config error\n");
1370 ixv_allocate_pci_resources(struct adapter *adapter)
1373 device_t dev = adapter->dev;
1376 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1379 if (!(adapter->pci_mem)) {
1380 device_printf(dev,"Unable to allocate bus resource: memory\n");
1384 adapter->osdep.mem_bus_space_tag =
1385 rman_get_bustag(adapter->pci_mem);
1386 adapter->osdep.mem_bus_space_handle =
1387 rman_get_bushandle(adapter->pci_mem);
1388 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1390 /* Pick up the tuneable queues */
1391 adapter->num_queues = ixv_num_queues;
1393 adapter->hw.back = &adapter->osdep;
1396 ** Now setup MSI/X, should
1397 ** return us the number of
1398 ** configured vectors.
1400 adapter->msix = ixv_setup_msix(adapter);
1401 if (adapter->msix == ENXIO)
1408 ixv_free_pci_resources(struct adapter * adapter)
1410 struct ix_queue *que = adapter->queues;
1411 device_t dev = adapter->dev;
1414 memrid = PCIR_BAR(MSIX_82598_BAR);
1417 ** There is a slight possibility of a failure mode
1418 ** in attach that will result in entering this function
1419 ** before interrupt resources have been initialized, and
1420 ** in that case we do not want to execute the loops below
1421 ** We can detect this reliably by the state of the adapter
1424 if (adapter->res == NULL)
1428 ** Release all msix queue resources:
1430 for (int i = 0; i < adapter->num_queues; i++, que++) {
1431 rid = que->msix + 1;
1432 if (que->tag != NULL) {
1433 bus_teardown_intr(dev, que->res, que->tag);
1436 if (que->res != NULL)
1437 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1441 /* Clean the Legacy or Link interrupt last */
1442 if (adapter->vector) /* we are doing MSIX */
1443 rid = adapter->vector + 1;
1445 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1447 if (adapter->tag != NULL) {
1448 bus_teardown_intr(dev, adapter->res, adapter->tag);
1449 adapter->tag = NULL;
1451 if (adapter->res != NULL)
1452 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1456 pci_release_msi(dev);
1458 if (adapter->msix_mem != NULL)
1459 bus_release_resource(dev, SYS_RES_MEMORY,
1460 memrid, adapter->msix_mem);
1462 if (adapter->pci_mem != NULL)
1463 bus_release_resource(dev, SYS_RES_MEMORY,
1464 PCIR_BAR(0), adapter->pci_mem);
1469 /*********************************************************************
1471 * Setup networking device structure and register an interface.
1473 **********************************************************************/
1475 ixv_setup_interface(device_t dev, struct adapter *adapter)
1479 INIT_DEBUGOUT("ixv_setup_interface: begin");
1481 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1483 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1484 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1485 ifp->if_baudrate = 1000000000;
1486 ifp->if_init = ixv_init;
1487 ifp->if_softc = adapter;
1488 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1489 ifp->if_ioctl = ixv_ioctl;
1490 #if __FreeBSD_version >= 800000
1491 ifp->if_transmit = ixgbe_mq_start;
1492 ifp->if_qflush = ixgbe_qflush;
1494 ifp->if_start = ixgbe_start;
1496 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1498 ether_ifattach(ifp, adapter->hw.mac.addr);
1500 adapter->max_frame_size =
1501 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1504 * Tell the upper layer(s) we support long frames.
1506 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1508 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1509 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1510 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1513 ifp->if_capabilities |= IFCAP_LRO;
1514 ifp->if_capenable = ifp->if_capabilities;
1517 * Specify the media types supported by this adapter and register
1518 * callbacks to update media and link information
1520 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1522 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1523 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1524 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1530 ixv_config_link(struct adapter *adapter)
1532 struct ixgbe_hw *hw = &adapter->hw;
1533 u32 autoneg, err = 0;
1535 if (hw->mac.ops.check_link)
1536 err = hw->mac.ops.check_link(hw, &autoneg,
1537 &adapter->link_up, FALSE);
1541 if (hw->mac.ops.setup_link)
1542 err = hw->mac.ops.setup_link(hw,
1543 autoneg, adapter->link_up);
1549 /*********************************************************************
1551 * Enable transmit unit.
1553 **********************************************************************/
1555 ixv_initialize_transmit_units(struct adapter *adapter)
1557 struct tx_ring *txr = adapter->tx_rings;
1558 struct ixgbe_hw *hw = &adapter->hw;
1561 for (int i = 0; i < adapter->num_queues; i++, txr++) {
1562 u64 tdba = txr->txdma.dma_paddr;
1565 /* Set WTHRESH to 8, burst writeback */
1566 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1567 txdctl |= (8 << 16);
1568 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1570 /* Set the HW Tx Head and Tail indices */
1571 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1572 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1574 /* Set Tx Tail register */
1575 txr->tail = IXGBE_VFTDT(i);
1577 /* Set the processing limit */
1578 txr->process_limit = ixv_tx_process_limit;
1580 /* Set Ring parameters */
1581 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1582 (tdba & 0x00000000ffffffffULL));
1583 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1584 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1585 adapter->num_tx_desc *
1586 sizeof(struct ixgbe_legacy_tx_desc));
1587 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1588 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1589 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1592 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1593 txdctl |= IXGBE_TXDCTL_ENABLE;
1594 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1601 /*********************************************************************
1603 * Setup receive registers and features.
1605 **********************************************************************/
1606 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1609 ixv_initialize_receive_units(struct adapter *adapter)
1611 struct rx_ring *rxr = adapter->rx_rings;
1612 struct ixgbe_hw *hw = &adapter->hw;
1613 struct ifnet *ifp = adapter->ifp;
1614 u32 bufsz, rxcsum, psrtype;
1617 if (ifp->if_mtu > ETHERMTU)
1618 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1620 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1622 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1623 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1624 IXGBE_PSRTYPE_L2HDR;
1626 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1628 /* Tell PF our expected packet-size */
1629 max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
1630 ixgbevf_rlpml_set_vf(hw, max_frame);
1632 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1633 u64 rdba = rxr->rxdma.dma_paddr;
1636 /* Disable the queue */
1637 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1638 rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
1639 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1640 for (int j = 0; j < 10; j++) {
1641 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1642 IXGBE_RXDCTL_ENABLE)
1648 /* Setup the Base and Length of the Rx Descriptor Ring */
1649 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1650 (rdba & 0x00000000ffffffffULL));
1651 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1653 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1654 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1656 /* Reset the ring indices */
1657 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1658 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1660 /* Set up the SRRCTL register */
1661 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1662 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1663 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1665 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1666 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1668 /* Set the Tail Pointer */
1669 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1670 adapter->num_rx_desc - 1);
1672 /* Set the processing limit */
1673 rxr->process_limit = ixv_rx_process_limit;
1675 /* Capture Rx Tail index */
1676 rxr->tail = IXGBE_VFRDT(rxr->me);
1678 /* Do the queue enabling last */
1679 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1680 rxdctl |= IXGBE_RXDCTL_ENABLE;
1681 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1682 for (int k = 0; k < 10; k++) {
1683 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1684 IXGBE_RXDCTL_ENABLE)
1692 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1694 if (ifp->if_capenable & IFCAP_RXCSUM)
1695 rxcsum |= IXGBE_RXCSUM_PCSD;
1697 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1698 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1700 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1706 ixv_setup_vlan_support(struct adapter *adapter)
1708 struct ixgbe_hw *hw = &adapter->hw;
1709 u32 ctrl, vid, vfta, retry;
1713 ** We get here thru init_locked, meaning
1714 ** a soft reset, this has already cleared
1715 ** the VFTA and other state, so if there
1716 ** have been no vlan's registered do nothing.
1718 if (adapter->num_vlans == 0)
1721 /* Enable the queues */
1722 for (int i = 0; i < adapter->num_queues; i++) {
1723 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1724 ctrl |= IXGBE_RXDCTL_VME;
1725 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1729 ** A soft reset zero's out the VFTA, so
1730 ** we need to repopulate it now.
1732 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1733 if (ixv_shadow_vfta[i] == 0)
1735 vfta = ixv_shadow_vfta[i];
1737 ** Reconstruct the vlan id's
1738 ** based on the bits set in each
1739 ** of the array ints.
1741 for ( int j = 0; j < 32; j++) {
1743 if ((vfta & (1 << j)) == 0)
1746 /* Call the shared code mailbox routine */
1747 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1756 ** This routine is run via an vlan config EVENT,
1757 ** it enables us to use the HW Filter table since
1758 ** we can get the vlan id. This just creates the
1759 ** entry in the soft version of the VFTA, init will
1760 ** repopulate the real table.
1763 ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1765 struct adapter *adapter = ifp->if_softc;
1768 if (ifp->if_softc != arg) /* Not our event */
1771 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1774 IXGBE_CORE_LOCK(adapter);
1775 index = (vtag >> 5) & 0x7F;
1777 ixv_shadow_vfta[index] |= (1 << bit);
1778 ++adapter->num_vlans;
1779 /* Re-init to load the changes */
1780 ixv_init_locked(adapter);
1781 IXGBE_CORE_UNLOCK(adapter);
1785 ** This routine is run via an vlan
1786 ** unconfig EVENT, remove our entry
1787 ** in the soft vfta.
1790 ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1792 struct adapter *adapter = ifp->if_softc;
1795 if (ifp->if_softc != arg)
1798 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1801 IXGBE_CORE_LOCK(adapter);
1802 index = (vtag >> 5) & 0x7F;
1804 ixv_shadow_vfta[index] &= ~(1 << bit);
1805 --adapter->num_vlans;
1806 /* Re-init to load the changes */
1807 ixv_init_locked(adapter);
1808 IXGBE_CORE_UNLOCK(adapter);
1812 ixv_enable_intr(struct adapter *adapter)
1814 struct ixgbe_hw *hw = &adapter->hw;
1815 struct ix_queue *que = adapter->queues;
1816 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1819 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1821 mask = IXGBE_EIMS_ENABLE_MASK;
1822 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1823 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1825 for (int i = 0; i < adapter->num_queues; i++, que++)
1826 ixv_enable_queue(adapter, que->msix);
1828 IXGBE_WRITE_FLUSH(hw);
1834 ixv_disable_intr(struct adapter *adapter)
1836 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1837 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1838 IXGBE_WRITE_FLUSH(&adapter->hw);
1843 ** Setup the correct IVAR register for a particular MSIX interrupt
1844 ** - entry is the register array entry
1845 ** - vector is the MSIX vector for this queue
1846 ** - type is RX/TX/MISC
1849 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1851 struct ixgbe_hw *hw = &adapter->hw;
1854 vector |= IXGBE_IVAR_ALLOC_VAL;
1856 if (type == -1) { /* MISC IVAR */
1857 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1860 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1861 } else { /* RX/TX IVARS */
1862 index = (16 * (entry & 1)) + (8 * type);
1863 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1864 ivar &= ~(0xFF << index);
1865 ivar |= (vector << index);
1866 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1871 ixv_configure_ivars(struct adapter *adapter)
1873 struct ix_queue *que = adapter->queues;
1875 for (int i = 0; i < adapter->num_queues; i++, que++) {
1876 /* First the RX queue entry */
1877 ixv_set_ivar(adapter, i, que->msix, 0);
1878 /* ... and the TX */
1879 ixv_set_ivar(adapter, i, que->msix, 1);
1880 /* Set an initial value in EITR */
1881 IXGBE_WRITE_REG(&adapter->hw,
1882 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1885 /* For the mailbox interrupt */
1886 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1891 ** Tasklet handler for MSIX MBX interrupts
1892 ** - do outside interrupt since it might sleep
1895 ixv_handle_mbx(void *context, int pending)
1897 struct adapter *adapter = context;
1899 ixgbe_check_link(&adapter->hw,
1900 &adapter->link_speed, &adapter->link_up, 0);
1901 ixv_update_link_status(adapter);
1905 ** The VF stats registers never have a truely virgin
1906 ** starting point, so this routine tries to make an
1907 ** artificial one, marking ground zero on attach as
1911 ixv_save_stats(struct adapter *adapter)
1913 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1914 adapter->stats.vf.saved_reset_vfgprc +=
1915 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1916 adapter->stats.vf.saved_reset_vfgptc +=
1917 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1918 adapter->stats.vf.saved_reset_vfgorc +=
1919 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1920 adapter->stats.vf.saved_reset_vfgotc +=
1921 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1922 adapter->stats.vf.saved_reset_vfmprc +=
1923 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1928 ixv_init_stats(struct adapter *adapter)
1930 struct ixgbe_hw *hw = &adapter->hw;
1932 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1933 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1934 adapter->stats.vf.last_vfgorc |=
1935 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1937 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1938 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1939 adapter->stats.vf.last_vfgotc |=
1940 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1942 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1944 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1945 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1946 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1947 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1948 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1951 #define UPDATE_STAT_32(reg, last, count) \
1953 u32 current = IXGBE_READ_REG(hw, reg); \
1954 if (current < last) \
1955 count += 0x100000000LL; \
1957 count &= 0xFFFFFFFF00000000LL; \
1961 #define UPDATE_STAT_36(lsb, msb, last, count) \
1963 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1964 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1965 u64 current = ((cur_msb << 32) | cur_lsb); \
1966 if (current < last) \
1967 count += 0x1000000000LL; \
1969 count &= 0xFFFFFFF000000000LL; \
1974 ** ixv_update_stats - Update the board statistics counters.
1977 ixv_update_stats(struct adapter *adapter)
1979 struct ixgbe_hw *hw = &adapter->hw;
1981 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1982 adapter->stats.vf.vfgprc);
1983 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1984 adapter->stats.vf.vfgptc);
1985 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1986 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1987 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1988 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1989 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1990 adapter->stats.vf.vfmprc);
1994 * Add statistic sysctls for the VF.
1997 ixv_add_stats_sysctls(struct adapter *adapter)
1999 device_t dev = adapter->dev;
2000 struct ix_queue *que = &adapter->queues[0];
2001 struct tx_ring *txr = que->txr;
2002 struct rx_ring *rxr = que->rxr;
2004 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2005 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2006 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2007 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2009 struct sysctl_oid *stat_node, *queue_node;
2010 struct sysctl_oid_list *stat_list, *queue_list;
2012 /* Driver Statistics */
2013 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2014 CTLFLAG_RD, &adapter->dropped_pkts,
2015 "Driver dropped packets");
2016 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2017 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2018 "m_defrag() failed");
2019 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2020 CTLFLAG_RD, &adapter->watchdog_events,
2021 "Watchdog timeouts");
2023 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2025 "VF Statistics (read from HW registers)");
2026 stat_list = SYSCTL_CHILDREN(stat_node);
2028 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2029 CTLFLAG_RD, &stats->vfgprc,
2030 "Good Packets Received");
2031 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2032 CTLFLAG_RD, &stats->vfgorc,
2033 "Good Octets Received");
2034 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2035 CTLFLAG_RD, &stats->vfmprc,
2036 "Multicast Packets Received");
2037 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2038 CTLFLAG_RD, &stats->vfgptc,
2039 "Good Packets Transmitted");
2040 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2041 CTLFLAG_RD, &stats->vfgotc,
2042 "Good Octets Transmitted");
2044 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2046 "Queue Statistics (collected by SW)");
2047 queue_list = SYSCTL_CHILDREN(queue_node);
2049 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2050 CTLFLAG_RD, &(que->irqs),
2052 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2053 CTLFLAG_RD, &(rxr->rx_irq),
2054 "RX irqs on queue");
2055 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2056 CTLFLAG_RD, &(rxr->rx_packets),
2058 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2059 CTLFLAG_RD, &(rxr->rx_bytes),
2061 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2062 CTLFLAG_RD, &(rxr->rx_discarded),
2063 "Discarded RX packets");
2065 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2066 CTLFLAG_RD, &(txr->total_packets),
2069 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2070 CTLFLAG_RD, &(txr->no_desc_avail),
2071 "# of times not enough descriptors were available during TX");
2074 /**********************************************************************
2076 * This routine is called only when em_display_debug_stats is enabled.
2077 * This routine provides a way to take a look at important statistics
2078 * maintained by the driver and hardware.
2080 **********************************************************************/
2082 ixv_print_debug_info(struct adapter *adapter)
2084 device_t dev = adapter->dev;
2085 struct ixgbe_hw *hw = &adapter->hw;
2086 struct ix_queue *que = adapter->queues;
2087 struct rx_ring *rxr;
2088 struct tx_ring *txr;
2089 struct lro_ctrl *lro;
2091 device_printf(dev,"Error Byte Count = %u \n",
2092 IXGBE_READ_REG(hw, IXGBE_ERRBC));
2094 for (int i = 0; i < adapter->num_queues; i++, que++) {
2098 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2099 que->msix, (long)que->irqs);
2100 device_printf(dev,"RX(%d) Packets Received: %lld\n",
2101 rxr->me, (long long)rxr->rx_packets);
2102 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2103 rxr->me, (long)rxr->rx_bytes);
2104 device_printf(dev,"RX(%d) LRO Queued= %d\n",
2105 rxr->me, lro->lro_queued);
2106 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2107 rxr->me, lro->lro_flushed);
2108 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2109 txr->me, (long)txr->total_packets);
2110 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2111 txr->me, (long)txr->no_desc_avail);
2114 device_printf(dev,"MBX IRQ Handled: %lu\n",
2115 (long)adapter->link_irq);
2120 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2123 struct adapter *adapter;
2126 error = sysctl_handle_int(oidp, &result, 0, req);
2128 if (error || !req->newptr)
2132 adapter = (struct adapter *) arg1;
2133 ixv_print_debug_info(adapter);