2 * Copyright (C) 2015 Cavium Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 #include "opt_inet6.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
44 #include <sys/module.h>
46 #include <sys/pciio.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
54 #include <sys/mutex.h>
56 #include <sys/taskqueue.h>
59 #include <net/ethernet.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
76 #include <sys/iov_schema.h>
78 #include <machine/bus.h>
80 #include "thunder_bgx.h"
83 #include "nicvf_queues.h"
85 #define VNIC_VF_DEVSTR "Cavium Thunder NIC Virtual Function Driver"
87 #define VNIC_VF_REG_RID PCIR_BAR(PCI_CFG_REG_BAR_NUM)
89 /* Lock for core interface settings */
90 #define NICVF_CORE_LOCK_INIT(nic) \
91 sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
93 #define NICVF_CORE_LOCK_DESTROY(nic) \
94 sx_destroy(&(nic)->core_sx)
96 #define NICVF_CORE_LOCK(nic) sx_xlock(&(nic)->core_sx)
97 #define NICVF_CORE_UNLOCK(nic) sx_xunlock(&(nic)->core_sx)
99 #define NICVF_CORE_LOCK_ASSERT(nic) sx_assert(&(nic)->core_sx, SA_XLOCKED)
102 #define SPEED_100 100
103 #define SPEED_1000 1000
104 #define SPEED_10000 10000
105 #define SPEED_40000 40000
107 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
109 static int nicvf_probe(device_t);
110 static int nicvf_attach(device_t);
111 static int nicvf_detach(device_t);
113 static device_method_t nicvf_methods[] = {
114 /* Device interface */
115 DEVMETHOD(device_probe, nicvf_probe),
116 DEVMETHOD(device_attach, nicvf_attach),
117 DEVMETHOD(device_detach, nicvf_detach),
122 static driver_t nicvf_driver = {
125 sizeof(struct nicvf),
128 static devclass_t nicvf_devclass;
130 DRIVER_MODULE(nicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
131 MODULE_DEPEND(nicvf, pci, 1, 1, 1);
132 MODULE_DEPEND(nicvf, ether, 1, 1, 1);
133 MODULE_DEPEND(nicvf, vnic_pf, 1, 1, 1);
135 static int nicvf_allocate_misc_interrupt(struct nicvf *);
136 static int nicvf_enable_misc_interrupt(struct nicvf *);
137 static int nicvf_allocate_net_interrupts(struct nicvf *);
138 static void nicvf_release_all_interrupts(struct nicvf *);
139 static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
140 static void nicvf_config_cpi(struct nicvf *);
141 static int nicvf_init_resources(struct nicvf *);
143 static int nicvf_setup_ifnet(struct nicvf *);
144 static int nicvf_setup_ifmedia(struct nicvf *);
145 static void nicvf_hw_addr_random(uint8_t *);
147 static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t);
148 static void nicvf_if_init(void *);
149 static void nicvf_if_init_locked(struct nicvf *);
150 static int nicvf_if_transmit(struct ifnet *, struct mbuf *);
151 static void nicvf_if_qflush(struct ifnet *);
152 static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter);
154 static int nicvf_stop_locked(struct nicvf *);
156 static void nicvf_media_status(struct ifnet *, struct ifmediareq *);
157 static int nicvf_media_change(struct ifnet *);
159 static void nicvf_tick_stats(void *);
162 nicvf_probe(device_t dev)
167 vendor_id = pci_get_vendor(dev);
168 device_id = pci_get_device(dev);
170 if (vendor_id != PCI_VENDOR_ID_CAVIUM)
173 if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
174 device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
175 device_set_desc(dev, VNIC_VF_DEVSTR);
176 return (BUS_PROBE_DEFAULT);
183 nicvf_attach(device_t dev)
188 uint8_t hwaddr[ETHER_ADDR_LEN];
189 uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
191 nic = device_get_softc(dev);
195 NICVF_CORE_LOCK_INIT(nic);
197 rid = VNIC_VF_REG_RID;
198 nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
200 if (nic->reg_base == NULL) {
201 device_printf(dev, "Could not allocate registers memory\n");
205 qcount = MAX_CMP_QUEUES_PER_QS;
206 nic->max_queues = qcount;
208 err = nicvf_set_qset_resources(nic);
212 /* Check if PF is alive and get MAC address for this VF */
213 err = nicvf_allocate_misc_interrupt(nic);
217 NICVF_CORE_LOCK(nic);
218 err = nicvf_enable_misc_interrupt(nic);
219 NICVF_CORE_UNLOCK(nic);
221 goto err_release_intr;
223 err = nicvf_allocate_net_interrupts(nic);
226 "Could not allocate network interface interrupts\n");
230 /* If no MAC address was obtained we generate random one */
231 if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
232 nicvf_hw_addr_random(hwaddr);
233 memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
234 NICVF_CORE_LOCK(nic);
235 nicvf_hw_set_mac_addr(nic, hwaddr);
236 NICVF_CORE_UNLOCK(nic);
239 /* Configure CPI alorithm */
240 nic->cpi_alg = CPI_ALG_NONE;
241 NICVF_CORE_LOCK(nic);
242 nicvf_config_cpi(nic);
243 NICVF_CORE_UNLOCK(nic);
245 err = nicvf_setup_ifnet(nic);
247 device_printf(dev, "Could not set-up ifnet\n");
248 goto err_release_intr;
251 err = nicvf_setup_ifmedia(nic);
253 device_printf(dev, "Could not set-up ifmedia\n");
257 mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
258 callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
260 ether_ifattach(nic->ifp, nic->hwaddr);
267 nicvf_release_all_interrupts(nic);
269 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
276 nicvf_detach(device_t dev)
280 nic = device_get_softc(dev);
282 NICVF_CORE_LOCK(nic);
283 /* Shut down the port and release ring resources */
284 nicvf_stop_locked(nic);
285 /* Release stats lock */
286 mtx_destroy(&nic->stats_mtx);
287 /* Release interrupts */
288 nicvf_release_all_interrupts(nic);
289 /* Release memory resource */
290 if (nic->reg_base != NULL) {
291 bus_release_resource(dev, SYS_RES_MEMORY,
292 rman_get_rid(nic->reg_base), nic->reg_base);
295 /* Remove all ifmedia configurations */
296 ifmedia_removeall(&nic->if_media);
297 /* Free this ifnet */
299 NICVF_CORE_UNLOCK(nic);
300 /* Finally destroy the lock */
301 NICVF_CORE_LOCK_DESTROY(nic);
307 nicvf_hw_addr_random(uint8_t *hwaddr)
310 uint8_t addr[ETHER_ADDR_LEN];
313 * Create randomized MAC address.
314 * Set 'bsd' + random 24 low-order bits.
316 rnd = arc4random() & 0x00ffffff;
324 memcpy(hwaddr, addr, ETHER_ADDR_LEN);
328 nicvf_setup_ifnet(struct nicvf *nic)
332 ifp = if_alloc(IFT_ETHER);
334 device_printf(nic->dev, "Could not allocate ifnet structure\n");
340 if_setsoftc(ifp, nic);
341 if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
342 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX);
344 if_settransmitfn(ifp, nicvf_if_transmit);
345 if_setqflushfn(ifp, nicvf_if_qflush);
346 if_setioctlfn(ifp, nicvf_if_ioctl);
347 if_setinitfn(ifp, nicvf_if_init);
348 if_setgetcounterfn(ifp, nicvf_if_getcounter);
350 /* Set send queue len to number to default maximum */
351 if_setsendqlen(ifp, IFQ_MAXLEN);
352 if_setsendqready(ifp);
353 if_setmtu(ifp, ETHERMTU);
355 if_setcapabilities(ifp, IFCAP_VLAN_MTU);
356 #ifdef DEVICE_POLLING
357 #error "DEVICE_POLLING not supported in VNIC driver yet"
358 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
360 if_setcapenable(ifp, if_getcapabilities(ifp));
361 if_setmtu(ifp, ETHERMTU);
367 nicvf_setup_ifmedia(struct nicvf *nic)
370 ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
374 * Advertise availability of all possible connection types,
375 * even though not all are possible at the same time.
378 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
380 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
382 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
384 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
386 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
388 ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
391 ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
397 nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
403 #if defined(INET) || defined(INET6)
405 boolean_t avoid_reset = FALSE;
408 nic = if_getsoftc(ifp);
409 ifr = (struct ifreq *)data;
410 #if defined(INET) || defined(INET6)
411 ifa = (struct ifaddr *)data;
417 if (ifa->ifa_addr->sa_family == AF_INET)
421 if (ifa->ifa_addr->sa_family == AF_INET6)
425 #if defined(INET) || defined(INET6)
426 /* Avoid reinitialization unless it's necessary */
428 ifp->if_flags |= IFF_UP;
429 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
432 if (!(if_getflags(ifp) & IFF_NOARP))
433 arp_ifinit(ifp, ifa);
439 err = ether_ioctl(ifp, cmd, data);
443 * ARM64TODO: Needs to be implemented.
444 * Currently ETHERMTU is set by default.
446 err = ether_ioctl(ifp, cmd, data);
449 NICVF_CORE_LOCK(nic);
450 if (if_getflags(ifp) & IFF_UP) {
451 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
452 flags = ifp->if_flags ^ nic->if_flags;
453 if ((nic->if_flags & ifp->if_flags) &
455 /* Change promiscous mode */
458 nicvf_set_promiscous(nic);
462 if ((nic->if_flags ^ ifp->if_flags) &
464 /* Change multicasting settings */
467 nicvf_set_multicast(nic);
471 nicvf_if_init_locked(nic);
473 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
474 nicvf_stop_locked(nic);
476 nic->if_flags = ifp->if_flags;
477 NICVF_CORE_UNLOCK(nic);
482 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
484 NICVF_CORE_LOCK(nic);
486 nicvf_set_multicast(nic);
487 NICVF_CORE_UNLOCK(nic);
494 err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
498 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
499 if (mask & IFCAP_VLAN_MTU) {
500 /* No work to do except acknowledge the change took. */
501 ifp->if_capenable ^= IFCAP_VLAN_MTU;
506 err = ether_ioctl(ifp, cmd, data);
514 nicvf_if_init_locked(struct nicvf *nic)
516 struct queue_set *qs = nic->qs;
522 NICVF_CORE_LOCK_ASSERT(nic);
525 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
526 nicvf_stop_locked(nic);
528 err = nicvf_enable_misc_interrupt(nic);
530 if_printf(ifp, "Could not reenable Mbox interrupt\n");
534 /* Get the latest MAC address */
535 if_addr = if_getlladdr(ifp);
536 /* Update MAC address if changed */
537 if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
538 memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
539 nicvf_hw_set_mac_addr(nic, if_addr);
542 /* Initialize the queues */
543 err = nicvf_init_resources(nic);
547 /* Make sure queue initialization is written */
550 nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
551 /* Enable Qset err interrupt */
552 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
554 /* Enable completion queue interrupt */
555 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
556 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
558 /* Enable RBDR threshold interrupt */
559 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
560 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
562 nic->drv_stats.txq_stop = 0;
563 nic->drv_stats.txq_wake = 0;
565 /* Activate network interface */
566 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
568 /* Schedule callout to update stats */
569 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
574 /* Something went very wrong. Disable this ifnet for good */
575 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
579 nicvf_if_init(void *if_softc)
581 struct nicvf *nic = if_softc;
583 NICVF_CORE_LOCK(nic);
584 nicvf_if_init_locked(nic);
585 NICVF_CORE_UNLOCK(nic);
589 nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
591 struct nicvf *nic = if_getsoftc(ifp);
592 struct queue_set *qs = nic->qs;
593 struct snd_queue *sq;
598 if (__predict_false(qs == NULL)) {
599 panic("%s: missing queue set for %s", __func__,
600 device_get_nameunit(nic->dev));
604 if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
605 qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
607 qidx = curcpu % qs->sq_cnt;
611 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
614 err = drbr_enqueue(ifp, sq->br, mbuf);
619 err = drbr_enqueue(ifp, sq->br, mbuf);
624 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
630 nicvf_if_qflush(struct ifnet *ifp)
633 struct queue_set *qs;
634 struct snd_queue *sq;
638 nic = if_getsoftc(ifp);
641 for (idx = 0; idx < qs->sq_cnt; idx++) {
644 while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
652 nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt)
655 struct nicvf_hw_stats *hw_stats;
656 struct nicvf_drv_stats *drv_stats;
658 nic = if_getsoftc(ifp);
659 hw_stats = &nic->hw_stats;
660 drv_stats = &nic->drv_stats;
663 case IFCOUNTER_IPACKETS:
664 return (drv_stats->rx_frames_ok);
665 case IFCOUNTER_OPACKETS:
666 return (drv_stats->tx_frames_ok);
667 case IFCOUNTER_IBYTES:
668 return (hw_stats->rx_bytes);
669 case IFCOUNTER_OBYTES:
670 return (hw_stats->tx_bytes_ok);
671 case IFCOUNTER_IMCASTS:
672 return (hw_stats->rx_mcast_frames);
673 case IFCOUNTER_COLLISIONS:
675 case IFCOUNTER_IQDROPS:
676 return (drv_stats->rx_drops);
677 case IFCOUNTER_OQDROPS:
678 return (drv_stats->tx_drops);
680 return (if_get_counter_default(ifp, cnt));
686 nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
688 struct nicvf *nic = if_getsoftc(ifp);
690 NICVF_CORE_LOCK(nic);
692 ifmr->ifm_status = IFM_AVALID;
693 ifmr->ifm_active = IFM_ETHER;
696 /* Device attached to working network */
697 ifmr->ifm_status |= IFM_ACTIVE;
700 switch (nic->speed) {
702 ifmr->ifm_active |= IFM_10_T;
705 ifmr->ifm_active |= IFM_100_TX;
708 ifmr->ifm_active |= IFM_1000_T;
711 ifmr->ifm_active |= IFM_10G_SR;
714 ifmr->ifm_active |= IFM_40G_CR4;
717 ifmr->ifm_active |= IFM_AUTO;
722 ifmr->ifm_active |= IFM_FDX;
724 ifmr->ifm_active |= IFM_HDX;
726 NICVF_CORE_UNLOCK(nic);
730 nicvf_media_change(struct ifnet *ifp __unused)
736 /* Register read/write APIs */
738 nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
741 bus_write_8(nic->reg_base, offset, val);
745 nicvf_reg_read(struct nicvf *nic, uint64_t offset)
748 return (bus_read_8(nic->reg_base, offset));
752 nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
753 uint64_t qidx, uint64_t val)
756 bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
760 nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
764 return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
767 /* VF -> PF mailbox communication */
769 nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
771 uint64_t *msg = (uint64_t *)mbx;
773 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
774 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
778 nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
780 int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
783 NICVF_CORE_LOCK_ASSERT(nic);
785 nic->pf_acked = FALSE;
786 nic->pf_nacked = FALSE;
788 nicvf_write_to_mbx(nic, mbx);
790 /* Wait for previous message to be acked, timeout 2sec */
791 while (!nic->pf_acked) {
801 device_printf(nic->dev,
802 "PF didn't ack to mbox msg %d from VF%d\n",
803 (mbx->msg.msg & 0xFF), nic->vf_id);
812 * Checks if VF is able to comminicate with PF
813 * and also gets the VNIC number this VF is associated to.
816 nicvf_check_pf_ready(struct nicvf *nic)
818 union nic_mbx mbx = {};
820 mbx.msg.msg = NIC_MBOX_MSG_READY;
821 if (nicvf_send_msg_to_pf(nic, &mbx)) {
822 device_printf(nic->dev,
823 "PF didn't respond to READY msg\n");
831 nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
835 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
837 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
841 nicvf_handle_mbx_intr(struct nicvf *nic)
843 union nic_mbx mbx = {};
848 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
849 mbx_data = (uint64_t *)&mbx;
851 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
852 *mbx_data = nicvf_reg_read(nic, mbx_addr);
854 mbx_addr += sizeof(uint64_t);
857 switch (mbx.msg.msg) {
858 case NIC_MBOX_MSG_READY:
859 nic->pf_acked = TRUE;
860 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
861 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
862 nic->node = mbx.nic_cfg.node_id;
863 memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
864 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
865 nic->link_up = FALSE;
869 case NIC_MBOX_MSG_ACK:
870 nic->pf_acked = TRUE;
872 case NIC_MBOX_MSG_NACK:
873 nic->pf_nacked = TRUE;
875 case NIC_MBOX_MSG_BGX_STATS:
876 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
877 nic->pf_acked = TRUE;
879 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
880 nic->pf_acked = TRUE;
881 nic->link_up = mbx.link_status.link_up;
882 nic->duplex = mbx.link_status.duplex;
883 nic->speed = mbx.link_status.speed;
885 if_setbaudrate(nic->ifp, nic->speed * 1000000);
886 if_link_state_change(nic->ifp, LINK_STATE_UP);
888 if_setbaudrate(nic->ifp, 0);
889 if_link_state_change(nic->ifp, LINK_STATE_DOWN);
893 device_printf(nic->dev,
894 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
897 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
901 nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
903 union nic_mbx mbx = {};
905 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
906 mbx.mac.vf_id = nic->vf_id;
907 memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
909 return (nicvf_send_msg_to_pf(nic, &mbx));
913 nicvf_config_cpi(struct nicvf *nic)
915 union nic_mbx mbx = {};
917 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
918 mbx.cpi_cfg.vf_id = nic->vf_id;
919 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
920 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
922 nicvf_send_msg_to_pf(nic, &mbx);
926 nicvf_init_resources(struct nicvf *nic)
929 union nic_mbx mbx = {};
931 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
934 nicvf_qset_config(nic, TRUE);
936 /* Initialize queues and HW for data transfer */
937 err = nicvf_config_data_transfer(nic, TRUE);
939 device_printf(nic->dev,
940 "Failed to alloc/config VF's QSet resources\n");
944 /* Send VF config done msg to PF */
945 nicvf_write_to_mbx(nic, &mbx);
951 nicvf_misc_intr_handler(void *arg)
953 struct nicvf *nic = (struct nicvf *)arg;
956 intr = nicvf_reg_read(nic, NIC_VF_INT);
957 /* Check for spurious interrupt */
958 if (!(intr & NICVF_INTR_MBOX_MASK))
961 nicvf_handle_mbx_intr(nic);
965 nicvf_intr_handler(void *arg)
968 struct cmp_queue *cq;
971 cq = (struct cmp_queue *)arg;
975 /* Disable interrupts */
976 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
978 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
980 /* Clear interrupt */
981 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
983 return (FILTER_HANDLED);
987 nicvf_rbdr_intr_handler(void *arg)
990 struct queue_set *qs;
994 nic = (struct nicvf *)arg;
996 /* Disable RBDR interrupt and schedule softirq */
997 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
998 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1000 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1003 rbdr = &qs->rbdr[qidx];
1004 taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1005 /* Clear interrupt */
1006 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1011 nicvf_qs_err_intr_handler(void *arg)
1013 struct nicvf *nic = (struct nicvf *)arg;
1014 struct queue_set *qs = nic->qs;
1016 /* Disable Qset err interrupt and schedule softirq */
1017 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1018 taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1019 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1024 nicvf_enable_msix(struct nicvf *nic)
1026 struct pci_devinfo *dinfo;
1030 dinfo = device_get_ivars(nic->dev);
1031 rid = dinfo->cfg.msix.msix_table_bar;
1032 nic->msix_table_res =
1033 bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1034 if (nic->msix_table_res == NULL) {
1035 device_printf(nic->dev,
1036 "Could not allocate memory for MSI-X table\n");
1040 count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1042 ret = pci_alloc_msix(nic->dev, &count);
1043 if ((ret != 0) || (count != nic->num_vec)) {
1044 device_printf(nic->dev,
1045 "Request for #%d msix vectors failed, error: %d\n",
1050 nic->msix_enabled = 1;
1055 nicvf_disable_msix(struct nicvf *nic)
1058 if (nic->msix_enabled) {
1059 pci_release_msi(nic->dev);
1060 nic->msix_enabled = 0;
1066 nicvf_release_all_interrupts(struct nicvf *nic)
1068 struct resource *res;
1072 /* Free registered interrupts */
1073 for (irq = 0; irq < nic->num_vec; irq++) {
1074 res = nic->msix_entries[irq].irq_res;
1077 /* Teardown interrupt first */
1078 if (nic->msix_entries[irq].handle != NULL) {
1079 err = bus_teardown_intr(nic->dev,
1080 nic->msix_entries[irq].irq_res,
1081 nic->msix_entries[irq].handle);
1083 ("ERROR: Unable to teardown interrupt %d", irq));
1084 nic->msix_entries[irq].handle = NULL;
1087 bus_release_resource(nic->dev, SYS_RES_IRQ,
1088 rman_get_rid(res), nic->msix_entries[irq].irq_res);
1089 nic->msix_entries[irq].irq_res = NULL;
1092 nicvf_disable_msix(nic);
1096 * Initialize MSIX vectors and register MISC interrupt.
1097 * Send READY message to PF to check if its alive
1100 nicvf_allocate_misc_interrupt(struct nicvf *nic)
1102 struct resource *res;
1106 /* Return if mailbox interrupt is already registered */
1107 if (nic->msix_enabled)
1111 if (nicvf_enable_msix(nic) != 0)
1114 irq = NICVF_INTR_ID_MISC;
1116 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1117 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1118 if (nic->msix_entries[irq].irq_res == NULL) {
1119 device_printf(nic->dev,
1120 "Could not allocate Mbox interrupt for VF%d\n",
1121 device_get_unit(nic->dev));
1125 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1126 (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1127 &nic->msix_entries[irq].handle);
1129 res = nic->msix_entries[irq].irq_res;
1130 bus_release_resource(nic->dev, SYS_RES_IRQ,
1131 rman_get_rid(res), res);
1132 nic->msix_entries[irq].irq_res = NULL;
1140 nicvf_enable_misc_interrupt(struct nicvf *nic)
1143 /* Enable mailbox interrupt */
1144 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1146 /* Check if VF is able to communicate with PF */
1147 if (!nicvf_check_pf_ready(nic)) {
1148 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1156 nicvf_release_net_interrupts(struct nicvf *nic)
1158 struct resource *res;
1162 for_each_cq_irq(irq) {
1163 res = nic->msix_entries[irq].irq_res;
1166 /* Teardown active interrupts first */
1167 if (nic->msix_entries[irq].handle != NULL) {
1168 err = bus_teardown_intr(nic->dev,
1169 nic->msix_entries[irq].irq_res,
1170 nic->msix_entries[irq].handle);
1172 ("ERROR: Unable to teardown CQ interrupt %d",
1173 (irq - NICVF_INTR_ID_CQ)));
1178 /* Release resource */
1179 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1181 nic->msix_entries[irq].irq_res = NULL;
1184 for_each_rbdr_irq(irq) {
1185 res = nic->msix_entries[irq].irq_res;
1188 /* Teardown active interrupts first */
1189 if (nic->msix_entries[irq].handle != NULL) {
1190 err = bus_teardown_intr(nic->dev,
1191 nic->msix_entries[irq].irq_res,
1192 nic->msix_entries[irq].handle);
1194 ("ERROR: Unable to teardown RDBR interrupt %d",
1195 (irq - NICVF_INTR_ID_RBDR)));
1200 /* Release resource */
1201 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1203 nic->msix_entries[irq].irq_res = NULL;
1206 irq = NICVF_INTR_ID_QS_ERR;
1207 res = nic->msix_entries[irq].irq_res;
1209 /* Teardown active interrupts first */
1210 if (nic->msix_entries[irq].handle != NULL) {
1211 err = bus_teardown_intr(nic->dev,
1212 nic->msix_entries[irq].irq_res,
1213 nic->msix_entries[irq].handle);
1215 ("ERROR: Unable to teardown QS Error interrupt %d",
1221 /* Release resource */
1222 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1224 nic->msix_entries[irq].irq_res = NULL;
1229 nicvf_allocate_net_interrupts(struct nicvf *nic)
1235 /* MSI-X must be configured by now */
1236 if (!nic->msix_enabled) {
1237 device_printf(nic->dev, "Cannot alloacte queue interrups. "
1238 "MSI-X interrupts disabled.\n");
1242 /* Register CQ interrupts */
1243 for_each_cq_irq(irq) {
1244 if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1247 qidx = irq - NICVF_INTR_ID_CQ;
1249 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1250 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1251 if (nic->msix_entries[irq].irq_res == NULL) {
1252 device_printf(nic->dev,
1253 "Could not allocate CQ interrupt %d for VF%d\n",
1254 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1258 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1259 (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1260 NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1262 device_printf(nic->dev,
1263 "Could not setup CQ interrupt %d for VF%d\n",
1264 (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1269 /* Register RBDR interrupt */
1270 for_each_rbdr_irq(irq) {
1271 if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1275 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1276 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1277 if (nic->msix_entries[irq].irq_res == NULL) {
1278 device_printf(nic->dev,
1279 "Could not allocate RBDR interrupt %d for VF%d\n",
1280 (irq - NICVF_INTR_ID_RBDR),
1281 device_get_unit(nic->dev));
1285 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1286 (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1287 nicvf_rbdr_intr_handler, nic,
1288 &nic->msix_entries[irq].handle);
1290 device_printf(nic->dev,
1291 "Could not setup RBDR interrupt %d for VF%d\n",
1292 (irq - NICVF_INTR_ID_RBDR),
1293 device_get_unit(nic->dev));
1298 /* Register QS error interrupt */
1299 irq = NICVF_INTR_ID_QS_ERR;
1301 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1302 SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1303 if (nic->msix_entries[irq].irq_res == NULL) {
1304 device_printf(nic->dev,
1305 "Could not allocate QS Error interrupt for VF%d\n",
1306 device_get_unit(nic->dev));
1310 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1311 (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1312 nic, &nic->msix_entries[irq].handle);
1314 device_printf(nic->dev,
1315 "Could not setup QS Error interrupt for VF%d\n",
1316 device_get_unit(nic->dev));
1322 nicvf_release_net_interrupts(nic);
1327 nicvf_stop_locked(struct nicvf *nic)
1331 struct queue_set *qs = nic->qs;
1332 union nic_mbx mbx = {};
1334 NICVF_CORE_LOCK_ASSERT(nic);
1335 /* Stop callout. Can block here since holding SX lock */
1336 callout_drain(&nic->stats_callout);
1340 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1341 nicvf_send_msg_to_pf(nic, &mbx);
1343 /* Disable RBDR & QS error interrupts */
1344 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1345 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1346 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1348 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1349 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1351 /* Deactivate network interface */
1352 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1354 /* Free resources */
1355 nicvf_config_data_transfer(nic, FALSE);
1357 /* Disable HW Qset */
1358 nicvf_qset_config(nic, FALSE);
1360 /* disable mailbox interrupt */
1361 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1367 nicvf_update_stats(struct nicvf *nic)
1370 struct nicvf_hw_stats *stats = &nic->hw_stats;
1371 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1372 struct queue_set *qs = nic->qs;
1374 #define GET_RX_STATS(reg) \
1375 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1376 #define GET_TX_STATS(reg) \
1377 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1379 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1380 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1381 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1382 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1383 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1384 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1385 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1386 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1387 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1388 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1389 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1390 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1391 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1392 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1394 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1395 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1396 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1397 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1398 stats->tx_drops = GET_TX_STATS(TX_DROP);
1400 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1401 stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1402 drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1403 drv_stats->tx_drops = stats->tx_drops;
1405 /* Update RQ and SQ stats */
1406 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1407 nicvf_update_rq_stats(nic, qidx);
1408 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1409 nicvf_update_sq_stats(nic, qidx);
1413 nicvf_tick_stats(void *arg)
1417 nic = (struct nicvf *)arg;
1419 /* Read the statistics */
1420 nicvf_update_stats(nic);
1422 callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);