]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/vnic/nicvf_main.c
Improve TX path of the VNIC driver
[FreeBSD/FreeBSD.git] / sys / dev / vnic / nicvf_main.c
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
39 #include <sys/bus.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/rman.h>
46 #include <sys/pciio.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/taskqueue.h>
57
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
70 #include <netinet/if_ether.h>
71 #include <netinet/tcp_lro.h>
72
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
75
76 #include <sys/dnv.h>
77 #include <sys/nv.h>
78 #include <sys/iov_schema.h>
79
80 #include <machine/bus.h>
81
82 #include "thunder_bgx.h"
83 #include "nic_reg.h"
84 #include "nic.h"
85 #include "nicvf_queues.h"
86
87 #define VNIC_VF_DEVSTR          "Cavium Thunder NIC Virtual Function Driver"
88
89 #define VNIC_VF_REG_RID         PCIR_BAR(PCI_CFG_REG_BAR_NUM)
90
91 /* Lock for core interface settings */
92 #define NICVF_CORE_LOCK_INIT(nic)                               \
93     sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
94
95 #define NICVF_CORE_LOCK_DESTROY(nic)                            \
96     sx_destroy(&(nic)->core_sx)
97
98 #define NICVF_CORE_LOCK(nic)            sx_xlock(&(nic)->core_sx)
99 #define NICVF_CORE_UNLOCK(nic)          sx_xunlock(&(nic)->core_sx)
100
101 #define NICVF_CORE_LOCK_ASSERT(nic)     sx_assert(&(nic)->core_sx, SA_XLOCKED)
102
103 #define SPEED_10        10
104 #define SPEED_100       100
105 #define SPEED_1000      1000
106 #define SPEED_10000     10000
107 #define SPEED_40000     40000
108
109 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
110
111 static int nicvf_probe(device_t);
112 static int nicvf_attach(device_t);
113 static int nicvf_detach(device_t);
114
115 static device_method_t nicvf_methods[] = {
116         /* Device interface */
117         DEVMETHOD(device_probe,         nicvf_probe),
118         DEVMETHOD(device_attach,        nicvf_attach),
119         DEVMETHOD(device_detach,        nicvf_detach),
120
121         DEVMETHOD_END,
122 };
123
124 static driver_t nicvf_driver = {
125         "vnic",
126         nicvf_methods,
127         sizeof(struct nicvf),
128 };
129
130 static devclass_t nicvf_devclass;
131
132 DRIVER_MODULE(nicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
133 MODULE_DEPEND(nicvf, pci, 1, 1, 1);
134 MODULE_DEPEND(nicvf, ether, 1, 1, 1);
135 MODULE_DEPEND(nicvf, vnic_pf, 1, 1, 1);
136
137 static int nicvf_allocate_misc_interrupt(struct nicvf *);
138 static int nicvf_enable_misc_interrupt(struct nicvf *);
139 static int nicvf_allocate_net_interrupts(struct nicvf *);
140 static void nicvf_release_all_interrupts(struct nicvf *);
141 static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
142 static void nicvf_config_cpi(struct nicvf *);
143 static int nicvf_init_resources(struct nicvf *);
144
145 static int nicvf_setup_ifnet(struct nicvf *);
146 static int nicvf_setup_ifmedia(struct nicvf *);
147 static void nicvf_hw_addr_random(uint8_t *);
148
149 static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t);
150 static void nicvf_if_init(void *);
151 static void nicvf_if_init_locked(struct nicvf *);
152 static int nicvf_if_transmit(struct ifnet *, struct mbuf *);
153 static void nicvf_if_qflush(struct ifnet *);
154 static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter);
155
156 static int nicvf_stop_locked(struct nicvf *);
157
158 static void nicvf_media_status(struct ifnet *, struct ifmediareq *);
159 static int nicvf_media_change(struct ifnet *);
160
161 static void nicvf_tick_stats(void *);
162
163 static int
164 nicvf_probe(device_t dev)
165 {
166         uint16_t vendor_id;
167         uint16_t device_id;
168
169         vendor_id = pci_get_vendor(dev);
170         device_id = pci_get_device(dev);
171
172         if (vendor_id != PCI_VENDOR_ID_CAVIUM)
173                 return (ENXIO);
174
175         if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
176             device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
177                 device_set_desc(dev, VNIC_VF_DEVSTR);
178                 return (BUS_PROBE_DEFAULT);
179         }
180
181         return (ENXIO);
182 }
183
184 static int
185 nicvf_attach(device_t dev)
186 {
187         struct nicvf *nic;
188         int rid, qcount;
189         int err = 0;
190         uint8_t hwaddr[ETHER_ADDR_LEN];
191         uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
192
193         nic = device_get_softc(dev);
194         nic->dev = dev;
195         nic->pnicvf = nic;
196
197         NICVF_CORE_LOCK_INIT(nic);
198         /* Enable HW TSO on Pass2 */
199         if (!pass1_silicon(dev))
200                 nic->hw_tso = TRUE;
201
202         rid = VNIC_VF_REG_RID;
203         nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
204             RF_ACTIVE);
205         if (nic->reg_base == NULL) {
206                 device_printf(dev, "Could not allocate registers memory\n");
207                 return (ENXIO);
208         }
209
210         qcount = MAX_CMP_QUEUES_PER_QS;
211         nic->max_queues = qcount;
212
213         err = nicvf_set_qset_resources(nic);
214         if (err != 0)
215                 goto err_free_res;
216
217         /* Check if PF is alive and get MAC address for this VF */
218         err = nicvf_allocate_misc_interrupt(nic);
219         if (err != 0)
220                 goto err_free_res;
221
222         NICVF_CORE_LOCK(nic);
223         err = nicvf_enable_misc_interrupt(nic);
224         NICVF_CORE_UNLOCK(nic);
225         if (err != 0)
226                 goto err_release_intr;
227
228         err = nicvf_allocate_net_interrupts(nic);
229         if (err != 0) {
230                 device_printf(dev,
231                     "Could not allocate network interface interrupts\n");
232                 goto err_free_ifnet;
233         }
234
235         /* If no MAC address was obtained we generate random one */
236         if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
237                 nicvf_hw_addr_random(hwaddr);
238                 memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
239                 NICVF_CORE_LOCK(nic);
240                 nicvf_hw_set_mac_addr(nic, hwaddr);
241                 NICVF_CORE_UNLOCK(nic);
242         }
243
244         /* Configure CPI alorithm */
245         nic->cpi_alg = CPI_ALG_NONE;
246         NICVF_CORE_LOCK(nic);
247         nicvf_config_cpi(nic);
248         NICVF_CORE_UNLOCK(nic);
249
250         err = nicvf_setup_ifnet(nic);
251         if (err != 0) {
252                 device_printf(dev, "Could not set-up ifnet\n");
253                 goto err_release_intr;
254         }
255
256         err = nicvf_setup_ifmedia(nic);
257         if (err != 0) {
258                 device_printf(dev, "Could not set-up ifmedia\n");
259                 goto err_free_ifnet;
260         }
261
262         mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
263         callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
264
265         ether_ifattach(nic->ifp, nic->hwaddr);
266
267         return (0);
268
269 err_free_ifnet:
270         if_free(nic->ifp);
271 err_release_intr:
272         nicvf_release_all_interrupts(nic);
273 err_free_res:
274         bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
275             nic->reg_base);
276
277         return (err);
278 }
279
280 static int
281 nicvf_detach(device_t dev)
282 {
283         struct nicvf *nic;
284
285         nic = device_get_softc(dev);
286
287         NICVF_CORE_LOCK(nic);
288         /* Shut down the port and release ring resources */
289         nicvf_stop_locked(nic);
290         /* Release stats lock */
291         mtx_destroy(&nic->stats_mtx);
292         /* Release interrupts */
293         nicvf_release_all_interrupts(nic);
294         /* Release memory resource */
295         if (nic->reg_base != NULL) {
296                 bus_release_resource(dev, SYS_RES_MEMORY,
297                     rman_get_rid(nic->reg_base), nic->reg_base);
298         }
299
300         /* Remove all ifmedia configurations */
301         ifmedia_removeall(&nic->if_media);
302         /* Free this ifnet */
303         if_free(nic->ifp);
304         NICVF_CORE_UNLOCK(nic);
305         /* Finally destroy the lock */
306         NICVF_CORE_LOCK_DESTROY(nic);
307
308         return (0);
309 }
310
311 static void
312 nicvf_hw_addr_random(uint8_t *hwaddr)
313 {
314         uint32_t rnd;
315         uint8_t addr[ETHER_ADDR_LEN];
316
317         /*
318          * Create randomized MAC address.
319          * Set 'bsd' + random 24 low-order bits.
320          */
321         rnd = arc4random() & 0x00ffffff;
322         addr[0] = 'b';
323         addr[1] = 's';
324         addr[2] = 'd';
325         addr[3] = rnd >> 16;
326         addr[4] = rnd >> 8;
327         addr[5] = rnd >> 0;
328
329         memcpy(hwaddr, addr, ETHER_ADDR_LEN);
330 }
331
332 static int
333 nicvf_setup_ifnet(struct nicvf *nic)
334 {
335         struct ifnet *ifp;
336
337         ifp = if_alloc(IFT_ETHER);
338         if (ifp == NULL) {
339                 device_printf(nic->dev, "Could not allocate ifnet structure\n");
340                 return (ENOMEM);
341         }
342
343         nic->ifp = ifp;
344
345         if_setsoftc(ifp, nic);
346         if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
347         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX);
348
349         if_settransmitfn(ifp, nicvf_if_transmit);
350         if_setqflushfn(ifp, nicvf_if_qflush);
351         if_setioctlfn(ifp, nicvf_if_ioctl);
352         if_setinitfn(ifp, nicvf_if_init);
353         if_setgetcounterfn(ifp, nicvf_if_getcounter);
354
355         if_setmtu(ifp, ETHERMTU);
356
357         /* Reset caps */
358         if_setcapabilities(ifp, 0);
359
360         /* Set the default values */
361         if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
362         if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
363         if (nic->hw_tso) {
364                 /* TSO */
365                 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
366                 /* TSO parameters */
367                 ifp->if_hw_tsomax = NICVF_TSO_MAXSIZE;
368                 ifp->if_hw_tsomaxsegcount = NICVF_TSO_NSEGS;
369                 ifp->if_hw_tsomaxsegsize = MCLBYTES;
370         }
371         /* IP/TCP/UDP HW checksums */
372         if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
373         if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
374         /*
375          * HW offload enable
376          */
377         if_clearhwassist(ifp);
378         if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0);
379         if (nic->hw_tso)
380                 if_sethwassistbits(ifp, (CSUM_TSO), 0);
381         if_setcapenable(ifp, if_getcapabilities(ifp));
382
383         return (0);
384 }
385
386 static int
387 nicvf_setup_ifmedia(struct nicvf *nic)
388 {
389
390         ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
391             nicvf_media_status);
392
393         /*
394          * Advertise availability of all possible connection types,
395          * even though not all are possible at the same time.
396          */
397
398         ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
399             0, NULL);
400         ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
401             0, NULL);
402         ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
403             0, NULL);
404         ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
405             0, NULL);
406         ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
407             0, NULL);
408         ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
409             0, NULL);
410
411         ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
412
413         return (0);
414 }
415
416 static int
417 nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
418 {
419         struct nicvf *nic;
420         struct rcv_queue *rq;
421         struct ifreq *ifr;
422         uint32_t flags;
423         int mask, err;
424         int rq_idx;
425 #if defined(INET) || defined(INET6)
426         struct ifaddr *ifa;
427         boolean_t avoid_reset = FALSE;
428 #endif
429
430         nic = if_getsoftc(ifp);
431         ifr = (struct ifreq *)data;
432 #if defined(INET) || defined(INET6)
433         ifa = (struct ifaddr *)data;
434 #endif
435         err = 0;
436         switch (cmd) {
437         case SIOCSIFADDR:
438 #ifdef INET
439                 if (ifa->ifa_addr->sa_family == AF_INET)
440                         avoid_reset = TRUE;
441 #endif
442 #ifdef INET6
443                 if (ifa->ifa_addr->sa_family == AF_INET6)
444                         avoid_reset = TRUE;
445 #endif
446
447 #if defined(INET) || defined(INET6)
448                 /* Avoid reinitialization unless it's necessary */
449                 if (avoid_reset) {
450                         ifp->if_flags |= IFF_UP;
451                         if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
452                                 nicvf_if_init(nic);
453 #ifdef INET
454                         if (!(if_getflags(ifp) & IFF_NOARP))
455                                 arp_ifinit(ifp, ifa);
456 #endif
457
458                         return (0);
459                 }
460 #endif
461                 err = ether_ioctl(ifp, cmd, data);
462                 break;
463         case SIOCSIFMTU:
464                 /*
465                  * ARM64TODO: Needs to be implemented.
466                  * Currently ETHERMTU is set by default.
467                  */
468                 err = ether_ioctl(ifp, cmd, data);
469                 break;
470         case SIOCSIFFLAGS:
471                 NICVF_CORE_LOCK(nic);
472                 if (if_getflags(ifp) & IFF_UP) {
473                         if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
474                                 flags = ifp->if_flags ^ nic->if_flags;
475                                 if ((nic->if_flags & ifp->if_flags) &
476                                     IFF_PROMISC) {
477                                         /* Change promiscous mode */
478 #if 0
479                                         /* ARM64TODO */
480                                         nicvf_set_promiscous(nic);
481 #endif
482                                 }
483
484                                 if ((nic->if_flags ^ ifp->if_flags) &
485                                     IFF_ALLMULTI) {
486                                         /* Change multicasting settings */
487 #if 0
488                                         /* ARM64TODO */
489                                         nicvf_set_multicast(nic);
490 #endif
491                                 }
492                         } else {
493                                 nicvf_if_init_locked(nic);
494                         }
495                 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
496                         nicvf_stop_locked(nic);
497
498                 nic->if_flags = ifp->if_flags;
499                 NICVF_CORE_UNLOCK(nic);
500                 break;
501
502         case SIOCADDMULTI:
503         case SIOCDELMULTI:
504                 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
505 #if 0
506                         NICVF_CORE_LOCK(nic);
507                         /* ARM64TODO */
508                         nicvf_set_multicast(nic);
509                         NICVF_CORE_UNLOCK(nic);
510 #endif
511                 }
512                 break;
513
514         case SIOCSIFMEDIA:
515         case SIOCGIFMEDIA:
516                 err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
517                 break;
518
519         case SIOCSIFCAP:
520                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
521                 if (mask & IFCAP_VLAN_MTU) {
522                         /* No work to do except acknowledge the change took. */
523                         ifp->if_capenable ^= IFCAP_VLAN_MTU;
524                 }
525                 if (mask & IFCAP_TXCSUM)
526                         ifp->if_capenable ^= IFCAP_TXCSUM;
527                 if (mask & IFCAP_RXCSUM)
528                         ifp->if_capenable ^= IFCAP_RXCSUM;
529                 if ((mask & IFCAP_TSO4) && nic->hw_tso)
530                         ifp->if_capenable ^= IFCAP_TSO4;
531                 if (mask & IFCAP_LRO) {
532                         /*
533                          * Lock the driver for a moment to avoid
534                          * mismatch in per-queue settings.
535                          */
536                         NICVF_CORE_LOCK(nic);
537                         ifp->if_capenable ^= IFCAP_LRO;
538                         if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
539                                 /*
540                                  * Now disable LRO for subsequent packets.
541                                  * Atomicity of this change is not necessary
542                                  * as we don't need precise toggle of this
543                                  * feature for all threads processing the
544                                  * completion queue.
545                                  */
546                                 for (rq_idx = 0;
547                                     rq_idx < nic->qs->rq_cnt; rq_idx++) {
548                                         rq = &nic->qs->rq[rq_idx];
549                                         rq->lro_enabled = !rq->lro_enabled;
550                                 }
551                         }
552                         NICVF_CORE_UNLOCK(nic);
553                 }
554
555                 break;
556
557         default:
558                 err = ether_ioctl(ifp, cmd, data);
559                 break;
560         }
561
562         return (err);
563 }
564
565 static void
566 nicvf_if_init_locked(struct nicvf *nic)
567 {
568         struct queue_set *qs = nic->qs;
569         struct ifnet *ifp;
570         int qidx;
571         int err;
572         caddr_t if_addr;
573
574         NICVF_CORE_LOCK_ASSERT(nic);
575         ifp = nic->ifp;
576
577         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
578                 nicvf_stop_locked(nic);
579
580         err = nicvf_enable_misc_interrupt(nic);
581         if (err != 0) {
582                 if_printf(ifp, "Could not reenable Mbox interrupt\n");
583                 return;
584         }
585
586         /* Get the latest MAC address */
587         if_addr = if_getlladdr(ifp);
588         /* Update MAC address if changed */
589         if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
590                 memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
591                 nicvf_hw_set_mac_addr(nic, if_addr);
592         }
593
594         /* Initialize the queues */
595         err = nicvf_init_resources(nic);
596         if (err != 0)
597                 goto error;
598
599         /* Make sure queue initialization is written */
600         wmb();
601
602         nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
603         /* Enable Qset err interrupt */
604         nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
605
606         /* Enable completion queue interrupt */
607         for (qidx = 0; qidx < qs->cq_cnt; qidx++)
608                 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
609
610         /* Enable RBDR threshold interrupt */
611         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
612                 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
613
614         nic->drv_stats.txq_stop = 0;
615         nic->drv_stats.txq_wake = 0;
616
617         /* Activate network interface */
618         if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
619
620         /* Schedule callout to update stats */
621         callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
622
623         return;
624
625 error:
626         /* Something went very wrong. Disable this ifnet for good */
627         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
628 }
629
630 static void
631 nicvf_if_init(void *if_softc)
632 {
633         struct nicvf *nic = if_softc;
634
635         NICVF_CORE_LOCK(nic);
636         nicvf_if_init_locked(nic);
637         NICVF_CORE_UNLOCK(nic);
638 }
639
640 static int
641 nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
642 {
643         struct nicvf *nic = if_getsoftc(ifp);
644         struct queue_set *qs = nic->qs;
645         struct snd_queue *sq;
646         struct mbuf *mtmp;
647         int qidx;
648         int err = 0;
649
650
651         if (__predict_false(qs == NULL)) {
652                 panic("%s: missing queue set for %s", __func__,
653                     device_get_nameunit(nic->dev));
654         }
655
656         /* Select queue */
657         if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
658                 qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
659         else
660                 qidx = curcpu % qs->sq_cnt;
661
662         sq = &qs->sq[qidx];
663
664         if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
665             IFF_DRV_RUNNING) || !nic->link_up) {
666                 err = drbr_enqueue(ifp, sq->br, mbuf);
667                 return (err);
668         }
669
670         if (mbuf->m_next != NULL &&
671             (mbuf->m_pkthdr.csum_flags &
672             (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) {
673                 if (M_WRITABLE(mbuf) == 0) {
674                         mtmp = m_dup(mbuf, M_NOWAIT);
675                         m_freem(mbuf);
676                         if (mtmp == NULL)
677                                 return (ENOBUFS);
678                         mbuf = mtmp;
679                 }
680         }
681
682         err = drbr_enqueue(ifp, sq->br, mbuf);
683         if (err != 0)
684                 return (err);
685
686         if (NICVF_TX_TRYLOCK(sq) != 0) {
687                 err = nicvf_xmit_locked(sq);
688                 NICVF_TX_UNLOCK(sq);
689                 return (err);
690         } else
691                 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
692
693         return (0);
694 }
695
696 static void
697 nicvf_if_qflush(struct ifnet *ifp)
698 {
699         struct nicvf *nic;
700         struct queue_set *qs;
701         struct snd_queue *sq;
702         struct mbuf *mbuf;
703         size_t idx;
704
705         nic = if_getsoftc(ifp);
706         qs = nic->qs;
707
708         for (idx = 0; idx < qs->sq_cnt; idx++) {
709                 sq = &qs->sq[idx];
710                 NICVF_TX_LOCK(sq);
711                 while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
712                         m_freem(mbuf);
713                 NICVF_TX_UNLOCK(sq);
714         }
715         if_qflush(ifp);
716 }
717
718 static uint64_t
719 nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt)
720 {
721         struct nicvf *nic;
722         struct nicvf_hw_stats *hw_stats;
723         struct nicvf_drv_stats *drv_stats;
724
725         nic = if_getsoftc(ifp);
726         hw_stats = &nic->hw_stats;
727         drv_stats = &nic->drv_stats;
728
729         switch (cnt) {
730         case IFCOUNTER_IPACKETS:
731                 return (drv_stats->rx_frames_ok);
732         case IFCOUNTER_OPACKETS:
733                 return (drv_stats->tx_frames_ok);
734         case IFCOUNTER_IBYTES:
735                 return (hw_stats->rx_bytes);
736         case IFCOUNTER_OBYTES:
737                 return (hw_stats->tx_bytes_ok);
738         case IFCOUNTER_IMCASTS:
739                 return (hw_stats->rx_mcast_frames);
740         case IFCOUNTER_COLLISIONS:
741                 return (0);
742         case IFCOUNTER_IQDROPS:
743                 return (drv_stats->rx_drops);
744         case IFCOUNTER_OQDROPS:
745                 return (drv_stats->tx_drops);
746         default:
747                 return (if_get_counter_default(ifp, cnt));
748         }
749
750 }
751
752 static void
753 nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
754 {
755         struct nicvf *nic = if_getsoftc(ifp);
756
757         NICVF_CORE_LOCK(nic);
758
759         ifmr->ifm_status = IFM_AVALID;
760         ifmr->ifm_active = IFM_ETHER;
761
762         if (nic->link_up) {
763                 /* Device attached to working network */
764                 ifmr->ifm_status |= IFM_ACTIVE;
765         }
766
767         switch (nic->speed) {
768         case SPEED_10:
769                 ifmr->ifm_active |= IFM_10_T;
770                 break;
771         case SPEED_100:
772                 ifmr->ifm_active |= IFM_100_TX;
773                 break;
774         case SPEED_1000:
775                 ifmr->ifm_active |= IFM_1000_T;
776                 break;
777         case SPEED_10000:
778                 ifmr->ifm_active |= IFM_10G_SR;
779                 break;
780         case SPEED_40000:
781                 ifmr->ifm_active |= IFM_40G_CR4;
782                 break;
783         default:
784                 ifmr->ifm_active |= IFM_AUTO;
785                 break;
786         }
787
788         if (nic->duplex)
789                 ifmr->ifm_active |= IFM_FDX;
790         else
791                 ifmr->ifm_active |= IFM_HDX;
792
793         NICVF_CORE_UNLOCK(nic);
794 }
795
796 static int
797 nicvf_media_change(struct ifnet *ifp __unused)
798 {
799
800         return (0);
801 }
802
803 /* Register read/write APIs */
804 void
805 nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
806 {
807
808         bus_write_8(nic->reg_base, offset, val);
809 }
810
811 uint64_t
812 nicvf_reg_read(struct nicvf *nic, uint64_t offset)
813 {
814
815         return (bus_read_8(nic->reg_base, offset));
816 }
817
818 void
819 nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
820     uint64_t qidx, uint64_t val)
821 {
822
823         bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
824 }
825
826 uint64_t
827 nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
828     uint64_t qidx)
829 {
830
831         return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
832 }
833
834 /* VF -> PF mailbox communication */
835 static void
836 nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
837 {
838         uint64_t *msg = (uint64_t *)mbx;
839
840         nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
841         nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
842 }
843
844 int
845 nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
846 {
847         int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
848         int sleep = 2;
849
850         NICVF_CORE_LOCK_ASSERT(nic);
851
852         nic->pf_acked = FALSE;
853         nic->pf_nacked = FALSE;
854
855         nicvf_write_to_mbx(nic, mbx);
856
857         /* Wait for previous message to be acked, timeout 2sec */
858         while (!nic->pf_acked) {
859                 if (nic->pf_nacked)
860                         return (EINVAL);
861
862                 DELAY(sleep * 1000);
863
864                 if (nic->pf_acked)
865                         break;
866                 timeout -= sleep;
867                 if (!timeout) {
868                         device_printf(nic->dev,
869                                    "PF didn't ack to mbox msg %d from VF%d\n",
870                                    (mbx->msg.msg & 0xFF), nic->vf_id);
871
872                         return (EBUSY);
873                 }
874         }
875         return (0);
876 }
877
878 /*
879  * Checks if VF is able to comminicate with PF
880  * and also gets the VNIC number this VF is associated to.
881  */
882 static int
883 nicvf_check_pf_ready(struct nicvf *nic)
884 {
885         union nic_mbx mbx = {};
886
887         mbx.msg.msg = NIC_MBOX_MSG_READY;
888         if (nicvf_send_msg_to_pf(nic, &mbx)) {
889                 device_printf(nic->dev,
890                            "PF didn't respond to READY msg\n");
891                 return 0;
892         }
893
894         return 1;
895 }
896
897 static void
898 nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
899 {
900
901         if (bgx->rx)
902                 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
903         else
904                 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
905 }
906
907 static void
908 nicvf_handle_mbx_intr(struct nicvf *nic)
909 {
910         union nic_mbx mbx = {};
911         uint64_t *mbx_data;
912         uint64_t mbx_addr;
913         int i;
914
915         mbx_addr = NIC_VF_PF_MAILBOX_0_1;
916         mbx_data = (uint64_t *)&mbx;
917
918         for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
919                 *mbx_data = nicvf_reg_read(nic, mbx_addr);
920                 mbx_data++;
921                 mbx_addr += sizeof(uint64_t);
922         }
923
924         switch (mbx.msg.msg) {
925         case NIC_MBOX_MSG_READY:
926                 nic->pf_acked = TRUE;
927                 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
928                 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
929                 nic->node = mbx.nic_cfg.node_id;
930                 memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
931                 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
932                 nic->link_up = FALSE;
933                 nic->duplex = 0;
934                 nic->speed = 0;
935                 break;
936         case NIC_MBOX_MSG_ACK:
937                 nic->pf_acked = TRUE;
938                 break;
939         case NIC_MBOX_MSG_NACK:
940                 nic->pf_nacked = TRUE;
941                 break;
942         case NIC_MBOX_MSG_BGX_STATS:
943                 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
944                 nic->pf_acked = TRUE;
945                 break;
946         case NIC_MBOX_MSG_BGX_LINK_CHANGE:
947                 nic->pf_acked = TRUE;
948                 nic->link_up = mbx.link_status.link_up;
949                 nic->duplex = mbx.link_status.duplex;
950                 nic->speed = mbx.link_status.speed;
951                 if (nic->link_up) {
952                         if_setbaudrate(nic->ifp, nic->speed * 1000000);
953                         if_link_state_change(nic->ifp, LINK_STATE_UP);
954                 } else {
955                         if_setbaudrate(nic->ifp, 0);
956                         if_link_state_change(nic->ifp, LINK_STATE_DOWN);
957                 }
958                 break;
959         default:
960                 device_printf(nic->dev,
961                            "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
962                 break;
963         }
964         nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
965 }
966
967 static int
968 nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
969 {
970         union nic_mbx mbx = {};
971
972         mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
973         mbx.mac.vf_id = nic->vf_id;
974         memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
975
976         return (nicvf_send_msg_to_pf(nic, &mbx));
977 }
978
979 static void
980 nicvf_config_cpi(struct nicvf *nic)
981 {
982         union nic_mbx mbx = {};
983
984         mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
985         mbx.cpi_cfg.vf_id = nic->vf_id;
986         mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
987         mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
988
989         nicvf_send_msg_to_pf(nic, &mbx);
990 }
991
992 static int
993 nicvf_init_resources(struct nicvf *nic)
994 {
995         int err;
996         union nic_mbx mbx = {};
997
998         mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
999
1000         /* Enable Qset */
1001         nicvf_qset_config(nic, TRUE);
1002
1003         /* Initialize queues and HW for data transfer */
1004         err = nicvf_config_data_transfer(nic, TRUE);
1005         if (err) {
1006                 device_printf(nic->dev,
1007                     "Failed to alloc/config VF's QSet resources\n");
1008                 return (err);
1009         }
1010
1011         /* Send VF config done msg to PF */
1012         nicvf_write_to_mbx(nic, &mbx);
1013
1014         return (0);
1015 }
1016
1017 static void
1018 nicvf_misc_intr_handler(void *arg)
1019 {
1020         struct nicvf *nic = (struct nicvf *)arg;
1021         uint64_t intr;
1022
1023         intr = nicvf_reg_read(nic, NIC_VF_INT);
1024         /* Check for spurious interrupt */
1025         if (!(intr & NICVF_INTR_MBOX_MASK))
1026                 return;
1027
1028         nicvf_handle_mbx_intr(nic);
1029 }
1030
1031 static int
1032 nicvf_intr_handler(void *arg)
1033 {
1034         struct nicvf *nic;
1035         struct cmp_queue *cq;
1036         int qidx;
1037
1038         cq = (struct cmp_queue *)arg;
1039         nic = cq->nic;
1040         qidx = cq->idx;
1041
1042         /* Disable interrupts */
1043         nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1044
1045         taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
1046
1047         /* Clear interrupt */
1048         nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1049
1050         return (FILTER_HANDLED);
1051 }
1052
1053 static void
1054 nicvf_rbdr_intr_handler(void *arg)
1055 {
1056         struct nicvf *nic;
1057         struct queue_set *qs;
1058         struct rbdr *rbdr;
1059         int qidx;
1060
1061         nic = (struct nicvf *)arg;
1062
1063         /* Disable RBDR interrupt and schedule softirq */
1064         for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1065                 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1066                         continue;
1067                 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1068
1069                 qs = nic->qs;
1070                 rbdr = &qs->rbdr[qidx];
1071                 taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1072                 /* Clear interrupt */
1073                 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1074         }
1075 }
1076
1077 static void
1078 nicvf_qs_err_intr_handler(void *arg)
1079 {
1080         struct nicvf *nic = (struct nicvf *)arg;
1081         struct queue_set *qs = nic->qs;
1082
1083         /* Disable Qset err interrupt and schedule softirq */
1084         nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1085         taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1086         nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1087
1088 }
1089
1090 static int
1091 nicvf_enable_msix(struct nicvf *nic)
1092 {
1093         struct pci_devinfo *dinfo;
1094         int rid, count;
1095         int ret;
1096
1097         dinfo = device_get_ivars(nic->dev);
1098         rid = dinfo->cfg.msix.msix_table_bar;
1099         nic->msix_table_res =
1100             bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1101         if (nic->msix_table_res == NULL) {
1102                 device_printf(nic->dev,
1103                     "Could not allocate memory for MSI-X table\n");
1104                 return (ENXIO);
1105         }
1106
1107         count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1108
1109         ret = pci_alloc_msix(nic->dev, &count);
1110         if ((ret != 0) || (count != nic->num_vec)) {
1111                 device_printf(nic->dev,
1112                     "Request for #%d msix vectors failed, error: %d\n",
1113                     nic->num_vec, ret);
1114                 return (ret);
1115         }
1116
1117         nic->msix_enabled = 1;
1118         return (0);
1119 }
1120
1121 static void
1122 nicvf_disable_msix(struct nicvf *nic)
1123 {
1124
1125         if (nic->msix_enabled) {
1126                 pci_release_msi(nic->dev);
1127                 nic->msix_enabled = 0;
1128                 nic->num_vec = 0;
1129         }
1130 }
1131
1132 static void
1133 nicvf_release_all_interrupts(struct nicvf *nic)
1134 {
1135         struct resource *res;
1136         int irq;
1137         int err;
1138
1139         /* Free registered interrupts */
1140         for (irq = 0; irq < nic->num_vec; irq++) {
1141                 res = nic->msix_entries[irq].irq_res;
1142                 if (res == NULL)
1143                         continue;
1144                 /* Teardown interrupt first */
1145                 if (nic->msix_entries[irq].handle != NULL) {
1146                         err = bus_teardown_intr(nic->dev,
1147                             nic->msix_entries[irq].irq_res,
1148                             nic->msix_entries[irq].handle);
1149                         KASSERT(err == 0,
1150                             ("ERROR: Unable to teardown interrupt %d", irq));
1151                         nic->msix_entries[irq].handle = NULL;
1152                 }
1153
1154                 bus_release_resource(nic->dev, SYS_RES_IRQ,
1155                             rman_get_rid(res), nic->msix_entries[irq].irq_res);
1156                 nic->msix_entries[irq].irq_res = NULL;
1157         }
1158         /* Disable MSI-X */
1159         nicvf_disable_msix(nic);
1160 }
1161
1162 /*
1163  * Initialize MSIX vectors and register MISC interrupt.
1164  * Send READY message to PF to check if its alive
1165  */
1166 static int
1167 nicvf_allocate_misc_interrupt(struct nicvf *nic)
1168 {
1169         struct resource *res;
1170         int irq, rid;
1171         int ret = 0;
1172
1173         /* Return if mailbox interrupt is already registered */
1174         if (nic->msix_enabled)
1175                 return (0);
1176
1177         /* Enable MSI-X */
1178         if (nicvf_enable_msix(nic) != 0)
1179                 return (ENXIO);
1180
1181         irq = NICVF_INTR_ID_MISC;
1182         rid = irq + 1;
1183         nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1184             SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1185         if (nic->msix_entries[irq].irq_res == NULL) {
1186                 device_printf(nic->dev,
1187                     "Could not allocate Mbox interrupt for VF%d\n",
1188                     device_get_unit(nic->dev));
1189                 return (ENXIO);
1190         }
1191
1192         ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1193             (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1194             &nic->msix_entries[irq].handle);
1195         if (ret != 0) {
1196                 res = nic->msix_entries[irq].irq_res;
1197                 bus_release_resource(nic->dev, SYS_RES_IRQ,
1198                             rman_get_rid(res), res);
1199                 nic->msix_entries[irq].irq_res = NULL;
1200                 return (ret);
1201         }
1202
1203         return (0);
1204 }
1205
1206 static int
1207 nicvf_enable_misc_interrupt(struct nicvf *nic)
1208 {
1209
1210         /* Enable mailbox interrupt */
1211         nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1212
1213         /* Check if VF is able to communicate with PF */
1214         if (!nicvf_check_pf_ready(nic)) {
1215                 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1216                 return (ENXIO);
1217         }
1218
1219         return (0);
1220 }
1221
1222 static void
1223 nicvf_release_net_interrupts(struct nicvf *nic)
1224 {
1225         struct resource *res;
1226         int irq;
1227         int err;
1228
1229         for_each_cq_irq(irq) {
1230                 res = nic->msix_entries[irq].irq_res;
1231                 if (res == NULL)
1232                         continue;
1233                 /* Teardown active interrupts first */
1234                 if (nic->msix_entries[irq].handle != NULL) {
1235                         err = bus_teardown_intr(nic->dev,
1236                             nic->msix_entries[irq].irq_res,
1237                             nic->msix_entries[irq].handle);
1238                         KASSERT(err == 0,
1239                             ("ERROR: Unable to teardown CQ interrupt %d",
1240                             (irq - NICVF_INTR_ID_CQ)));
1241                         if (err != 0)
1242                                 continue;
1243                 }
1244
1245                 /* Release resource */
1246                 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1247                     res);
1248                 nic->msix_entries[irq].irq_res = NULL;
1249         }
1250
1251         for_each_rbdr_irq(irq) {
1252                 res = nic->msix_entries[irq].irq_res;
1253                 if (res == NULL)
1254                         continue;
1255                 /* Teardown active interrupts first */
1256                 if (nic->msix_entries[irq].handle != NULL) {
1257                         err = bus_teardown_intr(nic->dev,
1258                             nic->msix_entries[irq].irq_res,
1259                             nic->msix_entries[irq].handle);
1260                         KASSERT(err == 0,
1261                             ("ERROR: Unable to teardown RDBR interrupt %d",
1262                             (irq - NICVF_INTR_ID_RBDR)));
1263                         if (err != 0)
1264                                 continue;
1265                 }
1266
1267                 /* Release resource */
1268                 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1269                     res);
1270                 nic->msix_entries[irq].irq_res = NULL;
1271         }
1272
1273         irq = NICVF_INTR_ID_QS_ERR;
1274         res = nic->msix_entries[irq].irq_res;
1275         if (res != NULL) {
1276                 /* Teardown active interrupts first */
1277                 if (nic->msix_entries[irq].handle != NULL) {
1278                         err = bus_teardown_intr(nic->dev,
1279                             nic->msix_entries[irq].irq_res,
1280                             nic->msix_entries[irq].handle);
1281                         KASSERT(err == 0,
1282                             ("ERROR: Unable to teardown QS Error interrupt %d",
1283                             irq));
1284                         if (err != 0)
1285                                 return;
1286                 }
1287
1288                 /* Release resource */
1289                 bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1290                     res);
1291                 nic->msix_entries[irq].irq_res = NULL;
1292         }
1293 }
1294
1295 static int
1296 nicvf_allocate_net_interrupts(struct nicvf *nic)
1297 {
1298         int irq, rid;
1299         int qidx;
1300         int ret = 0;
1301
1302         /* MSI-X must be configured by now */
1303         if (!nic->msix_enabled) {
1304                 device_printf(nic->dev, "Cannot alloacte queue interrups. "
1305                     "MSI-X interrupts disabled.\n");
1306                 return (ENXIO);
1307         }
1308
1309         /* Register CQ interrupts */
1310         for_each_cq_irq(irq) {
1311                 if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1312                         break;
1313
1314                 qidx = irq - NICVF_INTR_ID_CQ;
1315                 rid = irq + 1;
1316                 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1317                     SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1318                 if (nic->msix_entries[irq].irq_res == NULL) {
1319                         device_printf(nic->dev,
1320                             "Could not allocate CQ interrupt %d for VF%d\n",
1321                             (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1322                         ret = ENXIO;
1323                         goto error;
1324                 }
1325                 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1326                     (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1327                     NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1328                 if (ret != 0) {
1329                         device_printf(nic->dev,
1330                             "Could not setup CQ interrupt %d for VF%d\n",
1331                             (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1332                         goto error;
1333                 }
1334         }
1335
1336         /* Register RBDR interrupt */
1337         for_each_rbdr_irq(irq) {
1338                 if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1339                         break;
1340
1341                 rid = irq + 1;
1342                 nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1343                     SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1344                 if (nic->msix_entries[irq].irq_res == NULL) {
1345                         device_printf(nic->dev,
1346                             "Could not allocate RBDR interrupt %d for VF%d\n",
1347                             (irq - NICVF_INTR_ID_RBDR),
1348                             device_get_unit(nic->dev));
1349                         ret = ENXIO;
1350                         goto error;
1351                 }
1352                 ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1353                     (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1354                     nicvf_rbdr_intr_handler, nic,
1355                     &nic->msix_entries[irq].handle);
1356                 if (ret != 0) {
1357                         device_printf(nic->dev,
1358                             "Could not setup RBDR interrupt %d for VF%d\n",
1359                             (irq - NICVF_INTR_ID_RBDR),
1360                             device_get_unit(nic->dev));
1361                         goto error;
1362                 }
1363         }
1364
1365         /* Register QS error interrupt */
1366         irq = NICVF_INTR_ID_QS_ERR;
1367         rid = irq + 1;
1368         nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1369             SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1370         if (nic->msix_entries[irq].irq_res == NULL) {
1371                 device_printf(nic->dev,
1372                     "Could not allocate QS Error interrupt for VF%d\n",
1373                     device_get_unit(nic->dev));
1374                 ret = ENXIO;
1375                 goto error;
1376         }
1377         ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1378             (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1379             nic, &nic->msix_entries[irq].handle);
1380         if (ret != 0) {
1381                 device_printf(nic->dev,
1382                     "Could not setup QS Error interrupt for VF%d\n",
1383                     device_get_unit(nic->dev));
1384                 goto error;
1385         }
1386
1387         return (0);
1388 error:
1389         nicvf_release_net_interrupts(nic);
1390         return (ret);
1391 }
1392
1393 static int
1394 nicvf_stop_locked(struct nicvf *nic)
1395 {
1396         struct ifnet *ifp;
1397         int qidx;
1398         struct queue_set *qs = nic->qs;
1399         union nic_mbx mbx = {};
1400
1401         NICVF_CORE_LOCK_ASSERT(nic);
1402         /* Stop callout. Can block here since holding SX lock */
1403         callout_drain(&nic->stats_callout);
1404
1405         ifp = nic->ifp;
1406
1407         mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1408         nicvf_send_msg_to_pf(nic, &mbx);
1409
1410         /* Disable RBDR & QS error interrupts */
1411         for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1412                 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1413                 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1414         }
1415         nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1416         nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1417
1418         /* Deactivate network interface */
1419         if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1420
1421         /* Free resources */
1422         nicvf_config_data_transfer(nic, FALSE);
1423
1424         /* Disable HW Qset */
1425         nicvf_qset_config(nic, FALSE);
1426
1427         /* disable mailbox interrupt */
1428         nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1429
1430         return (0);
1431 }
1432
1433 static void
1434 nicvf_update_stats(struct nicvf *nic)
1435 {
1436         int qidx;
1437         struct nicvf_hw_stats *stats = &nic->hw_stats;
1438         struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1439         struct queue_set *qs = nic->qs;
1440
1441 #define GET_RX_STATS(reg) \
1442     nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1443 #define GET_TX_STATS(reg) \
1444     nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1445
1446         stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1447         stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1448         stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1449         stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1450         stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1451         stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1452         stats->rx_drop_red = GET_RX_STATS(RX_RED);
1453         stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1454         stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1455         stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1456         stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1457         stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1458         stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1459         stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1460
1461         stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1462         stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1463         stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1464         stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1465         stats->tx_drops = GET_TX_STATS(TX_DROP);
1466
1467         drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1468             stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1469         drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1470         drv_stats->tx_drops = stats->tx_drops;
1471
1472         /* Update RQ and SQ stats */
1473         for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1474                 nicvf_update_rq_stats(nic, qidx);
1475         for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1476                 nicvf_update_sq_stats(nic, qidx);
1477 }
1478
1479 static void
1480 nicvf_tick_stats(void *arg)
1481 {
1482         struct nicvf *nic;
1483
1484         nic = (struct nicvf *)arg;
1485
1486         /* Read the statistics */
1487         nicvf_update_stats(nic);
1488
1489         callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
1490 }