2 * Copyright (C) 2015 Cavium Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/module.h>
46 #include <sys/pciio.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
54 #include <sys/mutex.h>
56 #include <sys/taskqueue.h>
59 #include <net/ethernet.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/tcp_lro.h>
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
78 #include <sys/iov_schema.h>
80 #include <machine/bus.h>
82 #include "thunder_bgx.h"
85 #include "nicvf_queues.h"
87 #define VNIC_VF_DEVSTR "Cavium Thunder NIC Virtual Function Driver"
89 #define VNIC_VF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM)
91 /* Lock for core interface settings */
92 #define NICVF_CORE_LOCK_INIT(nic) \
93 sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
95 #define NICVF_CORE_LOCK_DESTROY(nic) \
96 sx_destroy(&(nic)->core_sx)
98 #define NICVF_CORE_LOCK(nic) sx_xlock(&(nic)->core_sx)
99 #define NICVF_CORE_UNLOCK(nic) sx_xunlock(&(nic)->core_sx)
101 #define NICVF_CORE_LOCK_ASSERT(nic) sx_assert(&(nic)->core_sx, SA_XLOCKED)
104 #define SPEED_100 100
105 #define SPEED_1000 1000
106 #define SPEED_10000 10000
107 #define SPEED_40000 40000
109 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
111 static int nicvf_probe(device_t);
112 static int nicvf_attach(device_t);
113 static int nicvf_detach(device_t);
115 static device_method_t nicvf_methods[] = {
116 /* Device interface */
117 DEVMETHOD(device_probe, nicvf_probe),
118 DEVMETHOD(device_attach, nicvf_attach),
119 DEVMETHOD(device_detach, nicvf_detach),
124 static driver_t nicvf_driver = {
127 sizeof(struct nicvf),
130 static devclass_t nicvf_devclass;
132 DRIVER_MODULE(vnicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
133 MODULE_VERSION(vnicvf, 1);
134 MODULE_DEPEND(vnicvf, pci, 1, 1, 1);
135 MODULE_DEPEND(vnicvf, ether, 1, 1, 1);
136 MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1);
138 static int nicvf_allocate_misc_interrupt(struct nicvf *);
139 static int nicvf_enable_misc_interrupt(struct nicvf *);
140 static int nicvf_allocate_net_interrupts(struct nicvf *);
141 static void nicvf_release_all_interrupts(struct nicvf *);
142 static int nicvf_update_hw_max_frs(struct nicvf *, int);
143 static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
144 static void nicvf_config_cpi(struct nicvf *);
145 static int nicvf_rss_init(struct nicvf *);
146 static int nicvf_init_resources(struct nicvf *);
148 static int nicvf_setup_ifnet(struct nicvf *);
149 static int nicvf_setup_ifmedia(struct nicvf *);
150 static void nicvf_hw_addr_random(uint8_t *);
152 static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t);
153 static void nicvf_if_init(void *);
154 static void nicvf_if_init_locked(struct nicvf *);
155 static int nicvf_if_transmit(struct ifnet *, struct mbuf *);
156 static void nicvf_if_qflush(struct ifnet *);
157 static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter);
159 static int nicvf_stop_locked(struct nicvf *);
161 static void nicvf_media_status(struct ifnet *, struct ifmediareq *);
162 static int nicvf_media_change(struct ifnet *);
164 static void nicvf_tick_stats(void *);
167 nicvf_probe(device_t dev)
172 vendor_id = pci_get_vendor(dev);
173 device_id = pci_get_device(dev);
175 if (vendor_id != PCI_VENDOR_ID_CAVIUM)
178 if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
179 device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
180 device_set_desc(dev, VNIC_VF_DEVSTR);
181 return (BUS_PROBE_DEFAULT);
188 nicvf_attach(device_t dev)
193 uint8_t hwaddr[ETHER_ADDR_LEN];
194 uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
196 nic = device_get_softc(dev);
200 NICVF_CORE_LOCK_INIT(nic);
201 /* Enable HW TSO on Pass2 */
202 if (!pass1_silicon(dev))
205 rid = VNIC_VF_REG_RID;
206 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
208 if (nic->reg_base == NULL) {
209 device_printf(dev, "Could not allocate registers memory\n");
213 qcount = MAX_CMP_QUEUES_PER_QS;
214 nic->max_queues = qcount;
216 err = nicvf_set_qset_resources(nic);
220 /* Check if PF is alive and get MAC address for this VF */
221 err = nicvf_allocate_misc_interrupt(nic);
225 NICVF_CORE_LOCK(nic);
226 err = nicvf_enable_misc_interrupt(nic);
227 NICVF_CORE_UNLOCK(nic);
229 goto err_release_intr;
231 err = nicvf_allocate_net_interrupts(nic);
234 "Could not allocate network interface interrupts\n");
238 /* If no MAC address was obtained we generate random one */
239 if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
240 nicvf_hw_addr_random(hwaddr);
241 memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
242 NICVF_CORE_LOCK(nic);
243 nicvf_hw_set_mac_addr(nic, hwaddr);
244 NICVF_CORE_UNLOCK(nic);
247 /* Configure CPI alorithm */
248 nic->cpi_alg = CPI_ALG_NONE;
249 NICVF_CORE_LOCK(nic);
250 nicvf_config_cpi(nic);
251 /* Configure receive side scaling */
252 if (nic->qs->rq_cnt > 1)
254 NICVF_CORE_UNLOCK(nic);
256 err = nicvf_setup_ifnet(nic);
258 device_printf(dev, "Could not set-up ifnet\n");
259 goto err_release_intr;
262 err = nicvf_setup_ifmedia(nic);
264 device_printf(dev, "Could not set-up ifmedia\n");
268 mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
269 callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
271 ether_ifattach(nic->ifp, nic->hwaddr);
278 nicvf_release_all_interrupts(nic);
280 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
287 nicvf_detach(device_t dev)
291 nic = device_get_softc(dev);
293 NICVF_CORE_LOCK(nic);
294 /* Shut down the port and release ring resources */
295 nicvf_stop_locked(nic);
296 /* Release stats lock */
297 mtx_destroy(&nic->stats_mtx);
298 /* Release interrupts */
299 nicvf_release_all_interrupts(nic);
300 /* Release memory resource */
301 if (nic->reg_base != NULL) {
302 bus_release_resource(dev, SYS_RES_MEMORY,
303 rman_get_rid(nic->reg_base), nic->reg_base);
306 /* Remove all ifmedia configurations */
307 ifmedia_removeall(&nic->if_media);
308 /* Free this ifnet */
310 NICVF_CORE_UNLOCK(nic);
311 /* Finally destroy the lock */
312 NICVF_CORE_LOCK_DESTROY(nic);
318 nicvf_hw_addr_random(uint8_t *hwaddr)
321 uint8_t addr[ETHER_ADDR_LEN];
324 * Create randomized MAC address.
325 * Set 'bsd' + random 24 low-order bits.
327 rnd = arc4random() & 0x00ffffff;
335 memcpy(hwaddr, addr, ETHER_ADDR_LEN);
339 nicvf_setup_ifnet(struct nicvf *nic)
343 ifp = if_alloc(IFT_ETHER);
345 device_printf(nic->dev, "Could not allocate ifnet structure\n");
351 if_setsoftc(ifp, nic);
352 if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
353 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
355 if_settransmitfn(ifp, nicvf_if_transmit);
356 if_setqflushfn(ifp, nicvf_if_qflush);
357 if_setioctlfn(ifp, nicvf_if_ioctl);
358 if_setinitfn(ifp, nicvf_if_init);
359 if_setgetcounterfn(ifp, nicvf_if_getcounter);
361 if_setmtu(ifp, ETHERMTU);
364 if_setcapabilities(ifp, 0);
366 /* Set the default values */
367 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
368 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
371 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
373 if_sethwtsomax(ifp, NICVF_TSO_MAXSIZE);
374 if_sethwtsomaxsegcount(ifp, NICVF_TSO_NSEGS);
375 if_sethwtsomaxsegsize(ifp, MCLBYTES);
377 /* IP/TCP/UDP HW checksums */
378 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
379 if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
383 if_clearhwassist(ifp);
384 if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0);
386 if_sethwassistbits(ifp, (CSUM_TSO), 0);
387 if_setcapenable(ifp, if_getcapabilities(ifp));
393 nicvf_setup_ifmedia(struct nicvf *nic)
396 ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
400 * Advertise availability of all possible connection types,
401 * even though not all are possible at the same time.
404 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
406 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
408 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
410 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
412 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
414 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
417 ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
423 nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
426 struct rcv_queue *rq;
430 #if defined(INET) || defined(INET6)
432 boolean_t avoid_reset = FALSE;
435 nic = if_getsoftc(ifp);
436 ifr = (struct ifreq *)data;
437 #if defined(INET) || defined(INET6)
438 ifa = (struct ifaddr *)data;
444 if (ifa->ifa_addr->sa_family == AF_INET)
448 if (ifa->ifa_addr->sa_family == AF_INET6)
452 #if defined(INET) || defined(INET6)
453 /* Avoid reinitialization unless it's necessary */
455 if_setflagbits(ifp, IFF_UP, 0);
456 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
459 if (!(if_getflags(ifp) & IFF_NOARP))
460 arp_ifinit(ifp, ifa);
466 err = ether_ioctl(ifp, cmd, data);
469 if (ifr->ifr_mtu < NIC_HW_MIN_FRS ||
470 ifr->ifr_mtu > NIC_HW_MAX_FRS) {
473 NICVF_CORE_LOCK(nic);
474 err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu);
476 if_setmtu(ifp, ifr->ifr_mtu);
477 NICVF_CORE_UNLOCK(nic);
481 NICVF_CORE_LOCK(nic);
482 if (if_getflags(ifp) & IFF_UP) {
483 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
484 if ((nic->if_flags & if_getflags(ifp)) &
486 /* Change promiscous mode */
489 nicvf_set_promiscous(nic);
493 if ((nic->if_flags ^ if_getflags(ifp)) &
495 /* Change multicasting settings */
498 nicvf_set_multicast(nic);
502 nicvf_if_init_locked(nic);
504 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
505 nicvf_stop_locked(nic);
507 nic->if_flags = if_getflags(ifp);
508 NICVF_CORE_UNLOCK(nic);
513 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
515 NICVF_CORE_LOCK(nic);
517 nicvf_set_multicast(nic);
518 NICVF_CORE_UNLOCK(nic);
525 err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
529 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
530 if (mask & IFCAP_VLAN_MTU) {
531 /* No work to do except acknowledge the change took. */
532 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
534 if (mask & IFCAP_TXCSUM)
535 if_togglecapenable(ifp, IFCAP_TXCSUM);
536 if (mask & IFCAP_RXCSUM)
537 if_togglecapenable(ifp, IFCAP_RXCSUM);
538 if ((mask & IFCAP_TSO4) && nic->hw_tso)
539 if_togglecapenable(ifp, IFCAP_TSO4);
540 if (mask & IFCAP_LRO) {
542 * Lock the driver for a moment to avoid
543 * mismatch in per-queue settings.
545 NICVF_CORE_LOCK(nic);
546 if_togglecapenable(ifp, IFCAP_LRO);
547 if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
549 * Now disable LRO for subsequent packets.
550 * Atomicity of this change is not necessary
551 * as we don't need precise toggle of this
552 * feature for all threads processing the
556 rq_idx < nic->qs->rq_cnt; rq_idx++) {
557 rq = &nic->qs->rq[rq_idx];
558 rq->lro_enabled = !rq->lro_enabled;
561 NICVF_CORE_UNLOCK(nic);
567 err = ether_ioctl(ifp, cmd, data);
575 nicvf_if_init_locked(struct nicvf *nic)
577 struct queue_set *qs = nic->qs;
583 NICVF_CORE_LOCK_ASSERT(nic);
586 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
587 nicvf_stop_locked(nic);
589 err = nicvf_enable_misc_interrupt(nic);
591 if_printf(ifp, "Could not reenable Mbox interrupt\n");
595 /* Get the latest MAC address */
596 if_addr = if_getlladdr(ifp);
597 /* Update MAC address if changed */
598 if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
599 memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
600 nicvf_hw_set_mac_addr(nic, if_addr);
603 /* Initialize the queues */
604 err = nicvf_init_resources(nic);
608 /* Make sure queue initialization is written */
611 nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
612 /* Enable Qset err interrupt */
613 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
615 /* Enable completion queue interrupt */
616 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
617 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
619 /* Enable RBDR threshold interrupt */
620 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
621 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
623 nic->drv_stats.txq_stop = 0;
624 nic->drv_stats.txq_wake = 0;
626 /* Activate network interface */
627 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
629 /* Schedule callout to update stats */
630 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
635 /* Something went very wrong. Disable this ifnet for good */
636 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
640 nicvf_if_init(void *if_softc)
642 struct nicvf *nic = if_softc;
644 NICVF_CORE_LOCK(nic);
645 nicvf_if_init_locked(nic);
646 NICVF_CORE_UNLOCK(nic);
650 nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
652 struct nicvf *nic = if_getsoftc(ifp);
653 struct queue_set *qs = nic->qs;
654 struct snd_queue *sq;
660 if (__predict_false(qs == NULL)) {
661 panic("%s: missing queue set for %s", __func__,
662 device_get_nameunit(nic->dev));
666 if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
667 qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
669 qidx = curcpu % qs->sq_cnt;
673 if (mbuf->m_next != NULL &&
674 (mbuf->m_pkthdr.csum_flags &
675 (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) {
676 if (M_WRITABLE(mbuf) == 0) {
677 mtmp = m_dup(mbuf, M_NOWAIT);
685 err = drbr_enqueue(ifp, sq->br, mbuf);
686 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
687 IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) {
689 * Try to enqueue packet to the ring buffer.
690 * If the driver is not active, link down or enqueue operation
691 * failed, return with the appropriate error code.
696 if (NICVF_TX_TRYLOCK(sq) != 0) {
697 err = nicvf_xmit_locked(sq);
701 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
707 nicvf_if_qflush(struct ifnet *ifp)
710 struct queue_set *qs;
711 struct snd_queue *sq;
715 nic = if_getsoftc(ifp);
718 for (idx = 0; idx < qs->sq_cnt; idx++) {
721 while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
729 nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt)
732 struct nicvf_hw_stats *hw_stats;
733 struct nicvf_drv_stats *drv_stats;
735 nic = if_getsoftc(ifp);
736 hw_stats = &nic->hw_stats;
737 drv_stats = &nic->drv_stats;
740 case IFCOUNTER_IPACKETS:
741 return (drv_stats->rx_frames_ok);
742 case IFCOUNTER_OPACKETS:
743 return (drv_stats->tx_frames_ok);
744 case IFCOUNTER_IBYTES:
745 return (hw_stats->rx_bytes);
746 case IFCOUNTER_OBYTES:
747 return (hw_stats->tx_bytes_ok);
748 case IFCOUNTER_IMCASTS:
749 return (hw_stats->rx_mcast_frames);
750 case IFCOUNTER_COLLISIONS:
752 case IFCOUNTER_IQDROPS:
753 return (drv_stats->rx_drops);
754 case IFCOUNTER_OQDROPS:
755 return (drv_stats->tx_drops);
757 return (if_get_counter_default(ifp, cnt));
763 nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
765 struct nicvf *nic = if_getsoftc(ifp);
767 NICVF_CORE_LOCK(nic);
769 ifmr->ifm_status = IFM_AVALID;
770 ifmr->ifm_active = IFM_ETHER;
773 /* Device attached to working network */
774 ifmr->ifm_status |= IFM_ACTIVE;
777 switch (nic->speed) {
779 ifmr->ifm_active |= IFM_10_T;
782 ifmr->ifm_active |= IFM_100_TX;
785 ifmr->ifm_active |= IFM_1000_T;
788 ifmr->ifm_active |= IFM_10G_SR;
791 ifmr->ifm_active |= IFM_40G_CR4;
794 ifmr->ifm_active |= IFM_AUTO;
799 ifmr->ifm_active |= IFM_FDX;
801 ifmr->ifm_active |= IFM_HDX;
803 NICVF_CORE_UNLOCK(nic);
807 nicvf_media_change(struct ifnet *ifp __unused)
813 /* Register read/write APIs */
815 nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
818 bus_write_8(nic->reg_base, offset, val);
822 nicvf_reg_read(struct nicvf *nic, uint64_t offset)
825 return (bus_read_8(nic->reg_base, offset));
829 nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
830 uint64_t qidx, uint64_t val)
833 bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
837 nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
841 return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
844 /* VF -> PF mailbox communication */
846 nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
848 uint64_t *msg = (uint64_t *)mbx;
850 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
851 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
855 nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
857 int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
860 NICVF_CORE_LOCK_ASSERT(nic);
862 nic->pf_acked = FALSE;
863 nic->pf_nacked = FALSE;
865 nicvf_write_to_mbx(nic, mbx);
867 /* Wait for previous message to be acked, timeout 2sec */
868 while (!nic->pf_acked) {
878 device_printf(nic->dev,
879 "PF didn't ack to mbox msg %d from VF%d\n",
880 (mbx->msg.msg & 0xFF), nic->vf_id);
889 * Checks if VF is able to comminicate with PF
890 * and also gets the VNIC number this VF is associated to.
893 nicvf_check_pf_ready(struct nicvf *nic)
895 union nic_mbx mbx = {};
897 mbx.msg.msg = NIC_MBOX_MSG_READY;
898 if (nicvf_send_msg_to_pf(nic, &mbx)) {
899 device_printf(nic->dev,
900 "PF didn't respond to READY msg\n");
908 nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
912 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
914 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
918 nicvf_handle_mbx_intr(struct nicvf *nic)
920 union nic_mbx mbx = {};
925 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
926 mbx_data = (uint64_t *)&mbx;
928 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
929 *mbx_data = nicvf_reg_read(nic, mbx_addr);
931 mbx_addr += sizeof(uint64_t);
934 switch (mbx.msg.msg) {
935 case NIC_MBOX_MSG_READY:
936 nic->pf_acked = TRUE;
937 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
938 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
939 nic->node = mbx.nic_cfg.node_id;
940 memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
941 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
942 nic->link_up = FALSE;
946 case NIC_MBOX_MSG_ACK:
947 nic->pf_acked = TRUE;
949 case NIC_MBOX_MSG_NACK:
950 nic->pf_nacked = TRUE;
952 case NIC_MBOX_MSG_RSS_SIZE:
953 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
954 nic->pf_acked = TRUE;
956 case NIC_MBOX_MSG_BGX_STATS:
957 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
958 nic->pf_acked = TRUE;
960 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
961 nic->pf_acked = TRUE;
962 nic->link_up = mbx.link_status.link_up;
963 nic->duplex = mbx.link_status.duplex;
964 nic->speed = mbx.link_status.speed;
966 if_setbaudrate(nic->ifp, nic->speed * 1000000);
967 if_link_state_change(nic->ifp, LINK_STATE_UP);
969 if_setbaudrate(nic->ifp, 0);
970 if_link_state_change(nic->ifp, LINK_STATE_DOWN);
974 device_printf(nic->dev,
975 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
978 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
982 nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
984 union nic_mbx mbx = {};
986 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
987 mbx.frs.max_frs = mtu;
988 mbx.frs.vf_id = nic->vf_id;
990 return nicvf_send_msg_to_pf(nic, &mbx);
994 nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
996 union nic_mbx mbx = {};
998 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
999 mbx.mac.vf_id = nic->vf_id;
1000 memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
1002 return (nicvf_send_msg_to_pf(nic, &mbx));
1006 nicvf_config_cpi(struct nicvf *nic)
1008 union nic_mbx mbx = {};
1010 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
1011 mbx.cpi_cfg.vf_id = nic->vf_id;
1012 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
1013 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
1015 nicvf_send_msg_to_pf(nic, &mbx);
1019 nicvf_get_rss_size(struct nicvf *nic)
1021 union nic_mbx mbx = {};
1023 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
1024 mbx.rss_size.vf_id = nic->vf_id;
1025 nicvf_send_msg_to_pf(nic, &mbx);
1029 nicvf_config_rss(struct nicvf *nic)
1031 union nic_mbx mbx = {};
1032 struct nicvf_rss_info *rss;
1036 rss = &nic->rss_info;
1037 ind_tbl_len = rss->rss_size;
1040 mbx.rss_cfg.vf_id = nic->vf_id;
1041 mbx.rss_cfg.hash_bits = rss->hash_bits;
1042 while (ind_tbl_len != 0) {
1043 mbx.rss_cfg.tbl_offset = nextq;
1044 mbx.rss_cfg.tbl_len = MIN(ind_tbl_len,
1045 RSS_IND_TBL_LEN_PER_MBX_MSG);
1046 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
1047 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
1049 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
1050 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
1052 nicvf_send_msg_to_pf(nic, &mbx);
1054 ind_tbl_len -= mbx.rss_cfg.tbl_len;
1059 nicvf_set_rss_key(struct nicvf *nic)
1061 struct nicvf_rss_info *rss;
1065 rss = &nic->rss_info;
1066 key_addr = NIC_VNIC_RSS_KEY_0_4;
1068 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
1069 nicvf_reg_write(nic, key_addr, rss->key[idx]);
1070 key_addr += sizeof(uint64_t);
1075 nicvf_rss_init(struct nicvf *nic)
1077 struct nicvf_rss_info *rss;
1080 nicvf_get_rss_size(nic);
1082 rss = &nic->rss_info;
1083 if (nic->cpi_alg != CPI_ALG_NONE) {
1084 rss->enable = FALSE;
1091 /* Using the HW reset value for now */
1092 rss->key[0] = 0xFEED0BADFEED0BADUL;
1093 rss->key[1] = 0xFEED0BADFEED0BADUL;
1094 rss->key[2] = 0xFEED0BADFEED0BADUL;
1095 rss->key[3] = 0xFEED0BADFEED0BADUL;
1096 rss->key[4] = 0xFEED0BADFEED0BADUL;
1098 nicvf_set_rss_key(nic);
1100 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
1101 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
1103 rss->hash_bits = fls(rss->rss_size) - 1;
1104 for (idx = 0; idx < rss->rss_size; idx++)
1105 rss->ind_tbl[idx] = idx % nic->rx_queues;
1107 nicvf_config_rss(nic);
1113 nicvf_init_resources(struct nicvf *nic)
1116 union nic_mbx mbx = {};
1118 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1121 nicvf_qset_config(nic, TRUE);
1123 /* Initialize queues and HW for data transfer */
1124 err = nicvf_config_data_transfer(nic, TRUE);
1126 device_printf(nic->dev,
1127 "Failed to alloc/config VF's QSet resources\n");
1131 /* Send VF config done msg to PF */
1132 nicvf_write_to_mbx(nic, &mbx);
1138 nicvf_misc_intr_handler(void *arg)
1140 struct nicvf *nic = (struct nicvf *)arg;
1143 intr = nicvf_reg_read(nic, NIC_VF_INT);
1144 /* Check for spurious interrupt */
1145 if (!(intr & NICVF_INTR_MBOX_MASK))
1148 nicvf_handle_mbx_intr(nic);
1152 nicvf_intr_handler(void *arg)
1155 struct cmp_queue *cq;
1158 cq = (struct cmp_queue *)arg;
1162 /* Disable interrupts */
1163 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1165 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
1167 /* Clear interrupt */
1168 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1170 return (FILTER_HANDLED);
1174 nicvf_rbdr_intr_handler(void *arg)
1177 struct queue_set *qs;
1181 nic = (struct nicvf *)arg;
1183 /* Disable RBDR interrupt and schedule softirq */
1184 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1185 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1187 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1190 rbdr = &qs->rbdr[qidx];
1191 taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1192 /* Clear interrupt */
1193 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1198 nicvf_qs_err_intr_handler(void *arg)
1200 struct nicvf *nic = (struct nicvf *)arg;
1201 struct queue_set *qs = nic->qs;
1203 /* Disable Qset err interrupt and schedule softirq */
1204 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1205 taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1206 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1211 nicvf_enable_msix(struct nicvf *nic)
1213 struct pci_devinfo *dinfo;
1217 dinfo = device_get_ivars(nic->dev);
1218 rid = dinfo->cfg.msix.msix_table_bar;
1219 nic->msix_table_res =
1220 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1221 if (nic->msix_table_res == NULL) {
1222 device_printf(nic->dev,
1223 "Could not allocate memory for MSI-X table\n");
1227 count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1229 ret = pci_alloc_msix(nic->dev, &count);
1230 if ((ret != 0) || (count != nic->num_vec)) {
1231 device_printf(nic->dev,
1232 "Request for #%d msix vectors failed, error: %d\n",
1237 nic->msix_enabled = 1;
1242 nicvf_disable_msix(struct nicvf *nic)
1245 if (nic->msix_enabled) {
1246 pci_release_msi(nic->dev);
1247 nic->msix_enabled = 0;
1253 nicvf_release_all_interrupts(struct nicvf *nic)
1255 struct resource *res;
1259 /* Free registered interrupts */
1260 for (irq = 0; irq < nic->num_vec; irq++) {
1261 res = nic->msix_entries[irq].irq_res;
1264 /* Teardown interrupt first */
1265 if (nic->msix_entries[irq].handle != NULL) {
1266 err = bus_teardown_intr(nic->dev,
1267 nic->msix_entries[irq].irq_res,
1268 nic->msix_entries[irq].handle);
1270 ("ERROR: Unable to teardown interrupt %d", irq));
1271 nic->msix_entries[irq].handle = NULL;
1274 bus_release_resource(nic->dev, SYS_RES_IRQ,
1275 rman_get_rid(res), nic->msix_entries[irq].irq_res);
1276 nic->msix_entries[irq].irq_res = NULL;
1279 nicvf_disable_msix(nic);
1283 * Initialize MSIX vectors and register MISC interrupt.
1284 * Send READY message to PF to check if its alive
1287 nicvf_allocate_misc_interrupt(struct nicvf *nic)
1289 struct resource *res;
1293 /* Return if mailbox interrupt is already registered */
1294 if (nic->msix_enabled)
1298 if (nicvf_enable_msix(nic) != 0)
1301 irq = NICVF_INTR_ID_MISC;
1303 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1304 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1305 if (nic->msix_entries[irq].irq_res == NULL) {
1306 device_printf(nic->dev,
1307 "Could not allocate Mbox interrupt for VF%d\n",
1308 device_get_unit(nic->dev));
1312 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1313 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1314 &nic->msix_entries[irq].handle);
1316 res = nic->msix_entries[irq].irq_res;
1317 bus_release_resource(nic->dev, SYS_RES_IRQ,
1318 rman_get_rid(res), res);
1319 nic->msix_entries[irq].irq_res = NULL;
1327 nicvf_enable_misc_interrupt(struct nicvf *nic)
1330 /* Enable mailbox interrupt */
1331 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1333 /* Check if VF is able to communicate with PF */
1334 if (!nicvf_check_pf_ready(nic)) {
1335 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1343 nicvf_release_net_interrupts(struct nicvf *nic)
1345 struct resource *res;
1349 for_each_cq_irq(irq) {
1350 res = nic->msix_entries[irq].irq_res;
1353 /* Teardown active interrupts first */
1354 if (nic->msix_entries[irq].handle != NULL) {
1355 err = bus_teardown_intr(nic->dev,
1356 nic->msix_entries[irq].irq_res,
1357 nic->msix_entries[irq].handle);
1359 ("ERROR: Unable to teardown CQ interrupt %d",
1360 (irq - NICVF_INTR_ID_CQ)));
1365 /* Release resource */
1366 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1368 nic->msix_entries[irq].irq_res = NULL;
1371 for_each_rbdr_irq(irq) {
1372 res = nic->msix_entries[irq].irq_res;
1375 /* Teardown active interrupts first */
1376 if (nic->msix_entries[irq].handle != NULL) {
1377 err = bus_teardown_intr(nic->dev,
1378 nic->msix_entries[irq].irq_res,
1379 nic->msix_entries[irq].handle);
1381 ("ERROR: Unable to teardown RDBR interrupt %d",
1382 (irq - NICVF_INTR_ID_RBDR)));
1387 /* Release resource */
1388 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1390 nic->msix_entries[irq].irq_res = NULL;
1393 irq = NICVF_INTR_ID_QS_ERR;
1394 res = nic->msix_entries[irq].irq_res;
1396 /* Teardown active interrupts first */
1397 if (nic->msix_entries[irq].handle != NULL) {
1398 err = bus_teardown_intr(nic->dev,
1399 nic->msix_entries[irq].irq_res,
1400 nic->msix_entries[irq].handle);
1402 ("ERROR: Unable to teardown QS Error interrupt %d",
1408 /* Release resource */
1409 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1411 nic->msix_entries[irq].irq_res = NULL;
1416 nicvf_allocate_net_interrupts(struct nicvf *nic)
1423 /* MSI-X must be configured by now */
1424 if (!nic->msix_enabled) {
1425 device_printf(nic->dev, "Cannot alloacte queue interrups. "
1426 "MSI-X interrupts disabled.\n");
1430 /* Register CQ interrupts */
1431 for_each_cq_irq(irq) {
1432 if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1435 qidx = irq - NICVF_INTR_ID_CQ;
1437 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1438 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1439 if (nic->msix_entries[irq].irq_res == NULL) {
1440 device_printf(nic->dev,
1441 "Could not allocate CQ interrupt %d for VF%d\n",
1442 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1446 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1447 (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1448 NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1450 device_printf(nic->dev,
1451 "Could not setup CQ interrupt %d for VF%d\n",
1452 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1455 cpuid = (device_get_unit(nic->dev) * CMP_QUEUE_CNT) + qidx;
1458 * Save CPU ID for later use when system-wide RSS is enabled.
1459 * It will be used to pit the CQ task to the same CPU that got
1462 nic->qs->cq[qidx].cmp_cpuid = cpuid;
1464 device_printf(nic->dev, "bind CQ%d IRQ to CPU%d\n",
1467 /* Bind interrupts to the given CPU */
1468 bus_bind_intr(nic->dev, nic->msix_entries[irq].irq_res, cpuid);
1471 /* Register RBDR interrupt */
1472 for_each_rbdr_irq(irq) {
1473 if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1477 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1478 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1479 if (nic->msix_entries[irq].irq_res == NULL) {
1480 device_printf(nic->dev,
1481 "Could not allocate RBDR interrupt %d for VF%d\n",
1482 (irq - NICVF_INTR_ID_RBDR),
1483 device_get_unit(nic->dev));
1487 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1488 (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1489 nicvf_rbdr_intr_handler, nic,
1490 &nic->msix_entries[irq].handle);
1492 device_printf(nic->dev,
1493 "Could not setup RBDR interrupt %d for VF%d\n",
1494 (irq - NICVF_INTR_ID_RBDR),
1495 device_get_unit(nic->dev));
1500 /* Register QS error interrupt */
1501 irq = NICVF_INTR_ID_QS_ERR;
1503 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1504 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1505 if (nic->msix_entries[irq].irq_res == NULL) {
1506 device_printf(nic->dev,
1507 "Could not allocate QS Error interrupt for VF%d\n",
1508 device_get_unit(nic->dev));
1512 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1513 (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1514 nic, &nic->msix_entries[irq].handle);
1516 device_printf(nic->dev,
1517 "Could not setup QS Error interrupt for VF%d\n",
1518 device_get_unit(nic->dev));
1524 nicvf_release_net_interrupts(nic);
1529 nicvf_stop_locked(struct nicvf *nic)
1533 struct queue_set *qs = nic->qs;
1534 union nic_mbx mbx = {};
1536 NICVF_CORE_LOCK_ASSERT(nic);
1537 /* Stop callout. Can block here since holding SX lock */
1538 callout_drain(&nic->stats_callout);
1542 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1543 nicvf_send_msg_to_pf(nic, &mbx);
1545 /* Disable RBDR & QS error interrupts */
1546 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1547 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1548 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1550 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1551 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1553 /* Deactivate network interface */
1554 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1556 /* Free resources */
1557 nicvf_config_data_transfer(nic, FALSE);
1559 /* Disable HW Qset */
1560 nicvf_qset_config(nic, FALSE);
1562 /* disable mailbox interrupt */
1563 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1569 nicvf_update_stats(struct nicvf *nic)
1572 struct nicvf_hw_stats *stats = &nic->hw_stats;
1573 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1574 struct queue_set *qs = nic->qs;
1576 #define GET_RX_STATS(reg) \
1577 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1578 #define GET_TX_STATS(reg) \
1579 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1581 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1582 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1583 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1584 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1585 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1586 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1587 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1588 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1589 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1590 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1591 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1592 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1593 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1594 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1596 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1597 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1598 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1599 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1600 stats->tx_drops = GET_TX_STATS(TX_DROP);
1602 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1603 stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1604 drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1605 drv_stats->tx_drops = stats->tx_drops;
1607 /* Update RQ and SQ stats */
1608 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1609 nicvf_update_rq_stats(nic, qidx);
1610 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1611 nicvf_update_sq_stats(nic, qidx);
1615 nicvf_tick_stats(void *arg)
1619 nic = (struct nicvf *)arg;
1621 /* Read the statistics */
1622 nicvf_update_stats(nic);
1624 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);