]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ixv.c
- Stop iflib(4) from leaking MSI messages on detachment by calling
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
49
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixv_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66         /* required last entry */
67 PVID_END
68 };
69
70 /************************************************************************
71  * Function prototypes
72  ************************************************************************/
73 static void     *ixv_register(device_t dev);
74 static int      ixv_if_attach_pre(if_ctx_t ctx);
75 static int      ixv_if_attach_post(if_ctx_t ctx);
76 static int      ixv_if_detach(if_ctx_t ctx);
77
78 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void     ixv_if_queues_free(if_ctx_t ctx);
82 static void     ixv_identify_hardware(if_ctx_t ctx);
83 static void     ixv_init_device_features(struct adapter *);
84 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void     ixv_free_pci_resources(if_ctx_t ctx);
86 static int      ixv_setup_interface(if_ctx_t ctx);
87 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int      ixv_if_media_change(if_ctx_t ctx);
89 static void     ixv_if_update_admin_status(if_ctx_t ctx);
90 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
91
92 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void     ixv_if_init(if_ctx_t ctx);
94 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void     ixv_if_stop(if_ctx_t ctx);
96 static int      ixv_negotiate_api(struct adapter *);
97
98 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void     ixv_initialize_receive_units(if_ctx_t ctx);
100 static void     ixv_initialize_rss_mapping(struct adapter *);
101
102 static void     ixv_setup_vlan_support(if_ctx_t ctx);
103 static void     ixv_configure_ivars(struct adapter *);
104 static void     ixv_if_enable_intr(if_ctx_t ctx);
105 static void     ixv_if_disable_intr(if_ctx_t ctx);
106 static void     ixv_if_multi_set(if_ctx_t ctx);
107
108 static void     ixv_if_register_vlan(if_ctx_t, u16);
109 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
110
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112
113 static void     ixv_save_stats(struct adapter *);
114 static void     ixv_init_stats(struct adapter *);
115 static void     ixv_update_stats(struct adapter *);
116 static void     ixv_add_stats_sysctls(struct adapter *adapter);
117
118 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
120
121 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
122
123 /* The MSI-X Interrupt handlers */
124 static int      ixv_msix_que(void *);
125 static int      ixv_msix_mbx(void *);
126
127 /************************************************************************
128  * FreeBSD Device Interface Entry Points
129  ************************************************************************/
130 static device_method_t ixv_methods[] = {
131         /* Device interface */
132         DEVMETHOD(device_register, ixv_register),
133         DEVMETHOD(device_probe, iflib_device_probe),
134         DEVMETHOD(device_attach, iflib_device_attach),
135         DEVMETHOD(device_detach, iflib_device_detach),
136         DEVMETHOD(device_shutdown, iflib_device_shutdown),
137         DEVMETHOD_END
138 };
139
140 static driver_t ixv_driver = {
141         "ixv", ixv_methods, sizeof(struct adapter),
142 };
143
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147 MODULE_DEPEND(ixv, pci, 1, 1, 1);
148 MODULE_DEPEND(ixv, ether, 1, 1, 1);
149 #ifdef DEV_NETMAP
150 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
151 #endif /* DEV_NETMAP */
152
153 static device_method_t ixv_if_methods[] = {
154         DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155         DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156         DEVMETHOD(ifdi_detach, ixv_if_detach),
157         DEVMETHOD(ifdi_init, ixv_if_init),
158         DEVMETHOD(ifdi_stop, ixv_if_stop),
159         DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160         DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161         DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162         DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163         DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164         DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165         DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166         DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167         DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168         DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169         DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170         DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171         DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172         DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173         DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174         DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175         DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176         DEVMETHOD_END
177 };
178
179 static driver_t ixv_if_driver = {
180   "ixv_if", ixv_if_methods, sizeof(struct adapter)
181 };
182
183 /*
184  * TUNEABLE PARAMETERS:
185  */
186
187 /* Flow control setting, default to full */
188 static int ixv_flow_control = ixgbe_fc_full;
189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190
191 /*
192  * Header split: this causes the hardware to DMA
193  * the header into a separate mbuf from the payload,
194  * it can be a performance win in some workloads, but
195  * in others it actually hurts, its off by default.
196  */
197 static int ixv_header_split = FALSE;
198 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
199
200 /*
201  * Shadow VFTA table, this is needed because
202  * the real filter table gets cleared during
203  * a soft reset and we need to repopulate it.
204  */
205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
206 extern struct if_txrx ixgbe_txrx;
207
208 static struct if_shared_ctx ixv_sctx_init = {
209         .isc_magic = IFLIB_MAGIC,
210         .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
211         .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212         .isc_tx_maxsegsize = PAGE_SIZE,
213         .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
214         .isc_tso_maxsegsize = PAGE_SIZE,
215         .isc_rx_maxsize = MJUM16BYTES,
216         .isc_rx_nsegments = 1,
217         .isc_rx_maxsegsize = MJUM16BYTES,
218         .isc_nfl = 1,
219         .isc_ntxqs = 1,
220         .isc_nrxqs = 1,
221         .isc_admin_intrcnt = 1,
222         .isc_vendor_info = ixv_vendor_info_array,
223         .isc_driver_version = ixv_driver_version,
224         .isc_driver = &ixv_if_driver,
225
226         .isc_nrxd_min = {MIN_RXD},
227         .isc_ntxd_min = {MIN_TXD},
228         .isc_nrxd_max = {MAX_RXD},
229         .isc_ntxd_max = {MAX_TXD},
230         .isc_nrxd_default = {DEFAULT_RXD},
231         .isc_ntxd_default = {DEFAULT_TXD},
232 };
233
234 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
235
236 static void *
237 ixv_register(device_t dev)
238 {
239         return (ixv_sctx);
240 }
241
242 /************************************************************************
243  * ixv_if_tx_queues_alloc
244  ************************************************************************/
245 static int
246 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247                        int ntxqs, int ntxqsets)
248 {
249         struct adapter     *adapter = iflib_get_softc(ctx);
250         if_softc_ctx_t     scctx = adapter->shared;
251         struct ix_tx_queue *que;
252         int                i, j, error;
253
254         MPASS(adapter->num_tx_queues == ntxqsets);
255         MPASS(ntxqs == 1);
256
257         /* Allocate queue structure memory */
258         adapter->tx_queues =
259             (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260                                          M_DEVBUF, M_NOWAIT | M_ZERO);
261         if (!adapter->tx_queues) {
262                 device_printf(iflib_get_dev(ctx),
263                     "Unable to allocate TX ring memory\n");
264                 return (ENOMEM);
265         }
266
267         for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
268                 struct tx_ring *txr = &que->txr;
269
270                 txr->me = i;
271                 txr->adapter =  que->adapter = adapter;
272                 adapter->active_queues |= (u64)1 << txr->me;
273
274                 /* Allocate report status array */
275                 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
276                         error = ENOMEM;
277                         goto fail;
278                 }
279                 for (j = 0; j < scctx->isc_ntxd[0]; j++)
280                         txr->tx_rsq[j] = QIDX_INVALID;
281                 /* get the virtual and physical address of the hardware queues */
282                 txr->tail = IXGBE_VFTDT(txr->me);
283                 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
284                 txr->tx_paddr = paddrs[i*ntxqs];
285
286                 txr->bytes = 0;
287                 txr->total_packets = 0;
288
289         }
290
291         device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
292             adapter->num_tx_queues);
293
294         return (0);
295
296  fail:
297         ixv_if_queues_free(ctx);
298
299         return (error);
300 } /* ixv_if_tx_queues_alloc */
301
302 /************************************************************************
303  * ixv_if_rx_queues_alloc
304  ************************************************************************/
305 static int
306 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
307                        int nrxqs, int nrxqsets)
308 {
309         struct adapter     *adapter = iflib_get_softc(ctx);
310         struct ix_rx_queue *que;
311         int                i, error;
312
313         MPASS(adapter->num_rx_queues == nrxqsets);
314         MPASS(nrxqs == 1);
315
316         /* Allocate queue structure memory */
317         adapter->rx_queues =
318             (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
319                                          M_DEVBUF, M_NOWAIT | M_ZERO);
320         if (!adapter->rx_queues) {
321                 device_printf(iflib_get_dev(ctx),
322                     "Unable to allocate TX ring memory\n");
323                 error = ENOMEM;
324                 goto fail;
325         }
326
327         for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
328                 struct rx_ring *rxr = &que->rxr;
329                 rxr->me = i;
330                 rxr->adapter = que->adapter = adapter;
331
332
333                 /* get the virtual and physical address of the hw queues */
334                 rxr->tail = IXGBE_VFRDT(rxr->me);
335                 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
336                 rxr->rx_paddr = paddrs[i*nrxqs];
337                 rxr->bytes = 0;
338                 rxr->que = que;
339         }
340
341         device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
342             adapter->num_rx_queues);
343
344         return (0);
345
346 fail:
347         ixv_if_queues_free(ctx);
348
349         return (error);
350 } /* ixv_if_rx_queues_alloc */
351
352 /************************************************************************
353  * ixv_if_queues_free
354  ************************************************************************/
355 static void
356 ixv_if_queues_free(if_ctx_t ctx)
357 {
358         struct adapter     *adapter = iflib_get_softc(ctx);
359         struct ix_tx_queue *que = adapter->tx_queues;
360         int                i;
361
362         if (que == NULL)
363                 goto free;
364
365         for (i = 0; i < adapter->num_tx_queues; i++, que++) {
366                 struct tx_ring *txr = &que->txr;
367                 if (txr->tx_rsq == NULL)
368                         break;
369
370                 free(txr->tx_rsq, M_DEVBUF);
371                 txr->tx_rsq = NULL;
372         }
373         if (adapter->tx_queues != NULL)
374                 free(adapter->tx_queues, M_DEVBUF);
375 free:
376         if (adapter->rx_queues != NULL)
377                 free(adapter->rx_queues, M_DEVBUF);
378         adapter->tx_queues = NULL;
379         adapter->rx_queues = NULL;
380 } /* ixv_if_queues_free */
381
382 /************************************************************************
383  * ixv_if_attach_pre - Device initialization routine
384  *
385  *   Called when the driver is being loaded.
386  *   Identifies the type of hardware, allocates all resources
387  *   and initializes the hardware.
388  *
389  *   return 0 on success, positive on failure
390  ************************************************************************/
391 static int
392 ixv_if_attach_pre(if_ctx_t ctx)
393 {
394         struct adapter  *adapter;
395         device_t        dev;
396         if_softc_ctx_t  scctx;
397         struct ixgbe_hw *hw;
398         int             error = 0;
399
400         INIT_DEBUGOUT("ixv_attach: begin");
401
402         /* Allocate, clear, and link in our adapter structure */
403         dev = iflib_get_dev(ctx);
404         adapter = iflib_get_softc(ctx);
405         adapter->dev = dev;
406         adapter->ctx = ctx;
407         adapter->hw.back = adapter;
408         scctx = adapter->shared = iflib_get_softc_ctx(ctx);
409         adapter->media = iflib_get_media(ctx);
410         hw = &adapter->hw;
411
412         /* Do base PCI setup - map BAR0 */
413         if (ixv_allocate_pci_resources(ctx)) {
414                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
415                 error = ENXIO;
416                 goto err_out;
417         }
418
419         /* SYSCTL APIs */
420         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
421             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
422             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
423             "Debug Info");
424
425         /* Determine hardware revision */
426         ixv_identify_hardware(ctx);
427         ixv_init_device_features(adapter);
428
429         /* Initialize the shared code */
430         error = ixgbe_init_ops_vf(hw);
431         if (error) {
432                 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
433                 error = EIO;
434                 goto err_out;
435         }
436
437         /* Setup the mailbox */
438         ixgbe_init_mbx_params_vf(hw);
439
440         error = hw->mac.ops.reset_hw(hw);
441         if (error == IXGBE_ERR_RESET_FAILED)
442                 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
443         else if (error)
444                 device_printf(dev, "...reset_hw() failed with error %d\n",
445                     error);
446         if (error) {
447                 error = EIO;
448                 goto err_out;
449         }
450
451         error = hw->mac.ops.init_hw(hw);
452         if (error) {
453                 device_printf(dev, "...init_hw() failed with error %d\n",
454                     error);
455                 error = EIO;
456                 goto err_out;
457         }
458
459         /* Negotiate mailbox API version */
460         error = ixv_negotiate_api(adapter);
461         if (error) {
462                 device_printf(dev,
463                     "Mailbox API negotiation failed during attach!\n");
464                 goto err_out;
465         }
466
467         /* If no mac address was assigned, make a random one */
468         if (!ixv_check_ether_addr(hw->mac.addr)) {
469                 u8 addr[ETHER_ADDR_LEN];
470                 arc4rand(&addr, sizeof(addr), 0);
471                 addr[0] &= 0xFE;
472                 addr[0] |= 0x02;
473                 bcopy(addr, hw->mac.addr, sizeof(addr));
474                 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
475         }
476
477         /* Most of the iflib initialization... */
478
479         iflib_set_mac(ctx, hw->mac.addr);
480         switch (adapter->hw.mac.type) {
481         case ixgbe_mac_X550_vf:
482         case ixgbe_mac_X550EM_x_vf:
483         case ixgbe_mac_X550EM_a_vf:
484                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
485                 break;
486         default:
487                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
488         }
489         scctx->isc_txqsizes[0] =
490             roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
491             sizeof(u32), DBA_ALIGN);
492         scctx->isc_rxqsizes[0] =
493             roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
494             DBA_ALIGN);
495         /* XXX */
496         scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
497             CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
498         scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
499         scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
500         scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
501         scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
502         scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
503
504         scctx->isc_txrx = &ixgbe_txrx;
505
506         /*
507          * Tell the upper layer(s) we support everything the PF
508          * driver does except...
509          *   Wake-on-LAN
510          */
511         scctx->isc_capabilities = IXGBE_CAPS;
512         scctx->isc_capabilities ^= IFCAP_WOL;
513         scctx->isc_capenable = scctx->isc_capabilities;
514
515         INIT_DEBUGOUT("ixv_if_attach_pre: end");
516
517         return (0);
518
519 err_out:
520         ixv_free_pci_resources(ctx);
521
522         return (error);
523 } /* ixv_if_attach_pre */
524
525 static int
526 ixv_if_attach_post(if_ctx_t ctx)
527 {
528         struct adapter *adapter = iflib_get_softc(ctx);
529         device_t       dev = iflib_get_dev(ctx);
530         int            error = 0;
531
532         /* Setup OS specific network interface */
533         error = ixv_setup_interface(ctx);
534         if (error) {
535                 device_printf(dev, "Interface setup failed: %d\n", error);
536                 goto end;
537         }
538
539         /* Do the stats setup */
540         ixv_save_stats(adapter);
541         ixv_init_stats(adapter);
542         ixv_add_stats_sysctls(adapter);
543
544 end:
545         return error;
546 } /* ixv_if_attach_post */
547
548 /************************************************************************
549  * ixv_detach - Device removal routine
550  *
551  *   Called when the driver is being removed.
552  *   Stops the adapter and deallocates all the resources
553  *   that were allocated for driver operation.
554  *
555  *   return 0 on success, positive on failure
556  ************************************************************************/
557 static int
558 ixv_if_detach(if_ctx_t ctx)
559 {
560         INIT_DEBUGOUT("ixv_detach: begin");
561
562         ixv_free_pci_resources(ctx);
563
564         return (0);
565 } /* ixv_if_detach */
566
567 /************************************************************************
568  * ixv_if_mtu_set
569  ************************************************************************/
570 static int
571 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
572 {
573         struct adapter *adapter = iflib_get_softc(ctx);
574         struct ifnet   *ifp = iflib_get_ifp(ctx);
575         int            error = 0;
576
577         IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
578         if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
579                 error = EINVAL;
580         } else {
581                 ifp->if_mtu = mtu;
582                 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
583         }
584
585         return error;
586 } /* ixv_if_mtu_set */
587
588 /************************************************************************
589  * ixv_if_init - Init entry point
590  *
591  *   Used in two ways: It is used by the stack as an init entry
592  *   point in network interface structure. It is also used
593  *   by the driver as a hw/sw initialization routine to get
594  *   to a consistent state.
595  *
596  *   return 0 on success, positive on failure
597  ************************************************************************/
598 static void
599 ixv_if_init(if_ctx_t ctx)
600 {
601         struct adapter  *adapter = iflib_get_softc(ctx);
602         struct ifnet    *ifp = iflib_get_ifp(ctx);
603         device_t        dev = iflib_get_dev(ctx);
604         struct ixgbe_hw *hw = &adapter->hw;
605         int             error = 0;
606
607         INIT_DEBUGOUT("ixv_if_init: begin");
608         hw->adapter_stopped = FALSE;
609         hw->mac.ops.stop_adapter(hw);
610
611         /* reprogram the RAR[0] in case user changed it. */
612         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
613
614         /* Get the latest mac address, User can use a LAA */
615         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
616         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
617
618         /* Reset VF and renegotiate mailbox API version */
619         hw->mac.ops.reset_hw(hw);
620         hw->mac.ops.start_hw(hw);
621         error = ixv_negotiate_api(adapter);
622         if (error) {
623                 device_printf(dev,
624                     "Mailbox API negotiation failed in if_init!\n");
625                 return;
626         }
627
628         ixv_initialize_transmit_units(ctx);
629
630         /* Setup Multicast table */
631         ixv_if_multi_set(ctx);
632
633         /*
634          * Determine the correct mbuf pool
635          * for doing jumbo/headersplit
636          */
637         if (ifp->if_mtu > ETHERMTU)
638                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
639         else
640                 adapter->rx_mbuf_sz = MCLBYTES;
641
642         /* Configure RX settings */
643         ixv_initialize_receive_units(ctx);
644
645         /* Set up VLAN offload and filter */
646         ixv_setup_vlan_support(ctx);
647
648         /* Set up MSI-X routing */
649         ixv_configure_ivars(adapter);
650
651         /* Set up auto-mask */
652         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
653
654         /* Set moderation on the Link interrupt */
655         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
656
657         /* Stats init */
658         ixv_init_stats(adapter);
659
660         /* Config/Enable Link */
661         hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
662             FALSE);
663
664         /* And now turn on interrupts */
665         ixv_if_enable_intr(ctx);
666
667         return;
668 } /* ixv_if_init */
669
670 /************************************************************************
671  * ixv_enable_queue
672  ************************************************************************/
673 static inline void
674 ixv_enable_queue(struct adapter *adapter, u32 vector)
675 {
676         struct ixgbe_hw *hw = &adapter->hw;
677         u32             queue = 1 << vector;
678         u32             mask;
679
680         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
681         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
682 } /* ixv_enable_queue */
683
684 /************************************************************************
685  * ixv_disable_queue
686  ************************************************************************/
687 static inline void
688 ixv_disable_queue(struct adapter *adapter, u32 vector)
689 {
690         struct ixgbe_hw *hw = &adapter->hw;
691         u64             queue = (u64)(1 << vector);
692         u32             mask;
693
694         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
695         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
696 } /* ixv_disable_queue */
697
698
699 /************************************************************************
700  * ixv_msix_que - MSI-X Queue Interrupt Service routine
701  ************************************************************************/
702 static int
703 ixv_msix_que(void *arg)
704 {
705         struct ix_rx_queue *que = arg;
706         struct adapter     *adapter = que->adapter;
707
708         ixv_disable_queue(adapter, que->msix);
709         ++que->irqs;
710
711         return (FILTER_SCHEDULE_THREAD);
712 } /* ixv_msix_que */
713
714 /************************************************************************
715  * ixv_msix_mbx
716  ************************************************************************/
717 static int
718 ixv_msix_mbx(void *arg)
719 {
720         struct adapter  *adapter = arg;
721         struct ixgbe_hw *hw = &adapter->hw;
722         u32             reg;
723
724         ++adapter->link_irq;
725
726         /* First get the cause */
727         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
728         /* Clear interrupt with write */
729         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
730
731         /* Link status change */
732         if (reg & IXGBE_EICR_LSC)
733                 iflib_admin_intr_deferred(adapter->ctx);
734
735         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
736
737         return (FILTER_HANDLED);
738 } /* ixv_msix_mbx */
739
740 /************************************************************************
741  * ixv_media_status - Media Ioctl callback
742  *
743  *   Called whenever the user queries the status of
744  *   the interface using ifconfig.
745  ************************************************************************/
746 static void
747 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
748 {
749         struct adapter *adapter = iflib_get_softc(ctx);
750
751         INIT_DEBUGOUT("ixv_media_status: begin");
752
753         iflib_admin_intr_deferred(ctx);
754
755         ifmr->ifm_status = IFM_AVALID;
756         ifmr->ifm_active = IFM_ETHER;
757
758         if (!adapter->link_active)
759                 return;
760
761         ifmr->ifm_status |= IFM_ACTIVE;
762
763         switch (adapter->link_speed) {
764                 case IXGBE_LINK_SPEED_1GB_FULL:
765                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
766                         break;
767                 case IXGBE_LINK_SPEED_10GB_FULL:
768                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
769                         break;
770                 case IXGBE_LINK_SPEED_100_FULL:
771                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
772                         break;
773                 case IXGBE_LINK_SPEED_10_FULL:
774                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
775                         break;
776         }
777 } /* ixv_if_media_status */
778
779 /************************************************************************
780  * ixv_if_media_change - Media Ioctl callback
781  *
782  *   Called when the user changes speed/duplex using
783  *   media/mediopt option with ifconfig.
784  ************************************************************************/
785 static int
786 ixv_if_media_change(if_ctx_t ctx)
787 {
788         struct adapter *adapter = iflib_get_softc(ctx);
789         struct ifmedia *ifm = iflib_get_media(ctx);
790
791         INIT_DEBUGOUT("ixv_media_change: begin");
792
793         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
794                 return (EINVAL);
795
796         switch (IFM_SUBTYPE(ifm->ifm_media)) {
797         case IFM_AUTO:
798                 break;
799         default:
800                 device_printf(adapter->dev, "Only auto media type\n");
801                 return (EINVAL);
802         }
803
804         return (0);
805 } /* ixv_if_media_change */
806
807
808 /************************************************************************
809  * ixv_negotiate_api
810  *
811  *   Negotiate the Mailbox API with the PF;
812  *   start with the most featured API first.
813  ************************************************************************/
814 static int
815 ixv_negotiate_api(struct adapter *adapter)
816 {
817         struct ixgbe_hw *hw = &adapter->hw;
818         int             mbx_api[] = { ixgbe_mbox_api_11,
819                                       ixgbe_mbox_api_10,
820                                       ixgbe_mbox_api_unknown };
821         int             i = 0;
822
823         while (mbx_api[i] != ixgbe_mbox_api_unknown) {
824                 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
825                         return (0);
826                 i++;
827         }
828
829         return (EINVAL);
830 } /* ixv_negotiate_api */
831
832
833 /************************************************************************
834  * ixv_if_multi_set - Multicast Update
835  *
836  *   Called whenever multicast address list is updated.
837  ************************************************************************/
838 static void
839 ixv_if_multi_set(if_ctx_t ctx)
840 {
841         u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
842         struct adapter     *adapter = iflib_get_softc(ctx);
843         u8                 *update_ptr;
844         struct ifmultiaddr *ifma;
845         if_t               ifp = iflib_get_ifp(ctx);
846         int                mcnt = 0;
847
848         IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
849
850         CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
851                 if (ifma->ifma_addr->sa_family != AF_LINK)
852                         continue;
853                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
854                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
855                     IXGBE_ETH_LENGTH_OF_ADDRESS);
856                 mcnt++;
857         }
858
859         update_ptr = mta;
860
861         adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
862             ixv_mc_array_itr, TRUE);
863 } /* ixv_if_multi_set */
864
865 /************************************************************************
866  * ixv_mc_array_itr
867  *
868  *   An iterator function needed by the multicast shared code.
869  *   It feeds the shared code routine the addresses in the
870  *   array of ixv_set_multi() one by one.
871  ************************************************************************/
872 static u8 *
873 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
874 {
875         u8 *addr = *update_ptr;
876         u8 *newptr;
877
878         *vmdq = 0;
879
880         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
881         *update_ptr = newptr;
882
883         return addr;
884 } /* ixv_mc_array_itr */
885
886 /************************************************************************
887  * ixv_if_local_timer - Timer routine
888  *
889  *   Checks for link status, updates statistics,
890  *   and runs the watchdog check.
891  ************************************************************************/
892 static void
893 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
894 {
895         if (qid != 0)
896                 return;
897
898         /* Fire off the adminq task */
899         iflib_admin_intr_deferred(ctx);
900 } /* ixv_if_local_timer */
901
902 /************************************************************************
903  * ixv_if_update_admin_status - Update OS on link state
904  *
905  * Note: Only updates the OS on the cached link state.
906  *       The real check of the hardware only happens with
907  *       a link interrupt.
908  ************************************************************************/
909 static void
910 ixv_if_update_admin_status(if_ctx_t ctx)
911 {
912         struct adapter *adapter = iflib_get_softc(ctx);
913         device_t       dev = iflib_get_dev(ctx);
914         s32            status;
915
916         adapter->hw.mac.get_link_status = TRUE;
917
918         status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
919             &adapter->link_up, FALSE);
920
921         if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
922                 /* Mailbox's Clear To Send status is lost or timeout occurred.
923                  * We need reinitialization. */
924                 iflib_get_ifp(ctx)->if_init(ctx);
925         }
926
927         if (adapter->link_up) {
928                 if (adapter->link_active == FALSE) {
929                         if (bootverbose)
930                                 device_printf(dev, "Link is up %d Gbps %s \n",
931                                     ((adapter->link_speed == 128) ? 10 : 1),
932                                     "Full Duplex");
933                         adapter->link_active = TRUE;
934                         iflib_link_state_change(ctx, LINK_STATE_UP,
935                             IF_Gbps(10));
936                 }
937         } else { /* Link down */
938                 if (adapter->link_active == TRUE) {
939                         if (bootverbose)
940                                 device_printf(dev, "Link is Down\n");
941                         iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
942                         adapter->link_active = FALSE;
943                 }
944         }
945
946         /* Stats Update */
947         ixv_update_stats(adapter);
948 } /* ixv_if_update_admin_status */
949
950
951 /************************************************************************
952  * ixv_if_stop - Stop the hardware
953  *
954  *   Disables all traffic on the adapter by issuing a
955  *   global reset on the MAC and deallocates TX/RX buffers.
956  ************************************************************************/
957 static void
958 ixv_if_stop(if_ctx_t ctx)
959 {
960         struct adapter  *adapter = iflib_get_softc(ctx);
961         struct ixgbe_hw *hw = &adapter->hw;
962
963         INIT_DEBUGOUT("ixv_stop: begin\n");
964
965         ixv_if_disable_intr(ctx);
966
967         hw->mac.ops.reset_hw(hw);
968         adapter->hw.adapter_stopped = FALSE;
969         hw->mac.ops.stop_adapter(hw);
970
971         /* Update the stack */
972         adapter->link_up = FALSE;
973         ixv_if_update_admin_status(ctx);
974
975         /* reprogram the RAR[0] in case user changed it. */
976         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
977 } /* ixv_if_stop */
978
979
980 /************************************************************************
981  * ixv_identify_hardware - Determine hardware revision.
982  ************************************************************************/
983 static void
984 ixv_identify_hardware(if_ctx_t ctx)
985 {
986         struct adapter  *adapter = iflib_get_softc(ctx);
987         device_t        dev = iflib_get_dev(ctx);
988         struct ixgbe_hw *hw = &adapter->hw;
989
990         /* Save off the information about this board */
991         hw->vendor_id = pci_get_vendor(dev);
992         hw->device_id = pci_get_device(dev);
993         hw->revision_id = pci_get_revid(dev);
994         hw->subsystem_vendor_id = pci_get_subvendor(dev);
995         hw->subsystem_device_id = pci_get_subdevice(dev);
996
997         /* A subset of set_mac_type */
998         switch (hw->device_id) {
999         case IXGBE_DEV_ID_82599_VF:
1000                 hw->mac.type = ixgbe_mac_82599_vf;
1001                 break;
1002         case IXGBE_DEV_ID_X540_VF:
1003                 hw->mac.type = ixgbe_mac_X540_vf;
1004                 break;
1005         case IXGBE_DEV_ID_X550_VF:
1006                 hw->mac.type = ixgbe_mac_X550_vf;
1007                 break;
1008         case IXGBE_DEV_ID_X550EM_X_VF:
1009                 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1010                 break;
1011         case IXGBE_DEV_ID_X550EM_A_VF:
1012                 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1013                 break;
1014         default:
1015                 device_printf(dev, "unknown mac type\n");
1016                 hw->mac.type = ixgbe_mac_unknown;
1017                 break;
1018         }
1019 } /* ixv_identify_hardware */
1020
1021 /************************************************************************
1022  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1023  ************************************************************************/
1024 static int
1025 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1026 {
1027         struct adapter     *adapter = iflib_get_softc(ctx);
1028         device_t           dev = iflib_get_dev(ctx);
1029         struct ix_rx_queue *rx_que = adapter->rx_queues;
1030         struct ix_tx_queue *tx_que;
1031         int                error, rid, vector = 0;
1032         char               buf[16];
1033
1034         for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1035                 rid = vector + 1;
1036
1037                 snprintf(buf, sizeof(buf), "rxq%d", i);
1038                 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1039                     IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1040
1041                 if (error) {
1042                         device_printf(iflib_get_dev(ctx),
1043                             "Failed to allocate que int %d err: %d", i, error);
1044                         adapter->num_rx_queues = i + 1;
1045                         goto fail;
1046                 }
1047
1048                 rx_que->msix = vector;
1049                 adapter->active_queues |= (u64)(1 << rx_que->msix);
1050
1051         }
1052
1053         for (int i = 0; i < adapter->num_tx_queues; i++) {
1054                 snprintf(buf, sizeof(buf), "txq%d", i);
1055                 tx_que = &adapter->tx_queues[i];
1056                 tx_que->msix = i % adapter->num_rx_queues;
1057                 iflib_softirq_alloc_generic(ctx,
1058                     &adapter->rx_queues[tx_que->msix].que_irq,
1059                     IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1060         }
1061         rid = vector + 1;
1062         error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1063             IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1064         if (error) {
1065                 device_printf(iflib_get_dev(ctx),
1066                     "Failed to register admin handler");
1067                 return (error);
1068         }
1069
1070         adapter->vector = vector;
1071         /*
1072          * Due to a broken design QEMU will fail to properly
1073          * enable the guest for MSIX unless the vectors in
1074          * the table are all set up, so we must rewrite the
1075          * ENABLE in the MSIX control register again at this
1076          * point to cause it to successfully initialize us.
1077          */
1078         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1079                 int msix_ctrl;
1080                 pci_find_cap(dev, PCIY_MSIX, &rid);
1081                 rid += PCIR_MSIX_CTRL;
1082                 msix_ctrl = pci_read_config(dev, rid, 2);
1083                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1084                 pci_write_config(dev, rid, msix_ctrl, 2);
1085         }
1086
1087         return (0);
1088
1089 fail:
1090         iflib_irq_free(ctx, &adapter->irq);
1091         rx_que = adapter->rx_queues;
1092         for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1093                 iflib_irq_free(ctx, &rx_que->que_irq);
1094
1095         return (error);
1096 } /* ixv_if_msix_intr_assign */
1097
1098 /************************************************************************
1099  * ixv_allocate_pci_resources
1100  ************************************************************************/
1101 static int
1102 ixv_allocate_pci_resources(if_ctx_t ctx)
1103 {
1104         struct adapter *adapter = iflib_get_softc(ctx);
1105         device_t       dev = iflib_get_dev(ctx);
1106         int            rid;
1107
1108         rid = PCIR_BAR(0);
1109         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1110             RF_ACTIVE);
1111
1112         if (!(adapter->pci_mem)) {
1113                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1114                 return (ENXIO);
1115         }
1116
1117         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1118         adapter->osdep.mem_bus_space_handle =
1119             rman_get_bushandle(adapter->pci_mem);
1120         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1121
1122         return (0);
1123 } /* ixv_allocate_pci_resources */
1124
1125 /************************************************************************
1126  * ixv_free_pci_resources
1127  ************************************************************************/
1128 static void
1129 ixv_free_pci_resources(if_ctx_t ctx)
1130 {
1131         struct adapter     *adapter = iflib_get_softc(ctx);
1132         struct ix_rx_queue *que = adapter->rx_queues;
1133         device_t           dev = iflib_get_dev(ctx);
1134
1135         /* Release all MSI-X queue resources */
1136         if (adapter->intr_type == IFLIB_INTR_MSIX)
1137                 iflib_irq_free(ctx, &adapter->irq);
1138
1139         if (que != NULL) {
1140                 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1141                         iflib_irq_free(ctx, &que->que_irq);
1142                 }
1143         }
1144
1145         if (adapter->pci_mem != NULL)
1146                 bus_release_resource(dev, SYS_RES_MEMORY,
1147                     rman_get_rid(adapter->pci_mem), adapter->pci_mem);
1148 } /* ixv_free_pci_resources */
1149
1150 /************************************************************************
1151  * ixv_setup_interface
1152  *
1153  *   Setup networking device structure and register an interface.
1154  ************************************************************************/
1155 static int
1156 ixv_setup_interface(if_ctx_t ctx)
1157 {
1158         struct adapter *adapter = iflib_get_softc(ctx);
1159         if_softc_ctx_t scctx = adapter->shared;
1160         struct ifnet   *ifp = iflib_get_ifp(ctx);
1161
1162         INIT_DEBUGOUT("ixv_setup_interface: begin");
1163
1164         if_setbaudrate(ifp, IF_Gbps(10));
1165         ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1166
1167
1168         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1169         ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1170         ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1171
1172         return 0;
1173 } /* ixv_setup_interface */
1174
1175 /************************************************************************
1176  * ixv_if_get_counter
1177  ************************************************************************/
1178 static uint64_t
1179 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1180 {
1181         struct adapter *adapter = iflib_get_softc(ctx);
1182         if_t           ifp = iflib_get_ifp(ctx);
1183
1184         switch (cnt) {
1185         case IFCOUNTER_IPACKETS:
1186                 return (adapter->ipackets);
1187         case IFCOUNTER_OPACKETS:
1188                 return (adapter->opackets);
1189         case IFCOUNTER_IBYTES:
1190                 return (adapter->ibytes);
1191         case IFCOUNTER_OBYTES:
1192                 return (adapter->obytes);
1193         case IFCOUNTER_IMCASTS:
1194                 return (adapter->imcasts);
1195         default:
1196                 return (if_get_counter_default(ifp, cnt));
1197         }
1198 } /* ixv_if_get_counter */
1199
1200 /************************************************************************
1201  * ixv_initialize_transmit_units - Enable transmit unit.
1202  ************************************************************************/
1203 static void
1204 ixv_initialize_transmit_units(if_ctx_t ctx)
1205 {
1206         struct adapter     *adapter = iflib_get_softc(ctx);
1207         struct ixgbe_hw    *hw = &adapter->hw;
1208         if_softc_ctx_t     scctx = adapter->shared;
1209         struct ix_tx_queue *que = adapter->tx_queues;
1210         int                i;
1211
1212         for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1213                 struct tx_ring *txr = &que->txr;
1214                 u64            tdba = txr->tx_paddr;
1215                 u32            txctrl, txdctl;
1216                 int            j = txr->me;
1217
1218                 /* Set WTHRESH to 8, burst writeback */
1219                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1220                 txdctl |= (8 << 16);
1221                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1222
1223                 /* Set the HW Tx Head and Tail indices */
1224                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1225                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1226
1227                 /* Set Tx Tail register */
1228                 txr->tail = IXGBE_VFTDT(j);
1229
1230                 txr->tx_rs_cidx = txr->tx_rs_pidx;
1231                 /* Initialize the last processed descriptor to be the end of
1232                  * the ring, rather than the start, so that we avoid an
1233                  * off-by-one error when calculating how many descriptors are
1234                  * done in the credits_update function.
1235                  */
1236                 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1237                 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1238                         txr->tx_rsq[k] = QIDX_INVALID;
1239
1240                 /* Set Ring parameters */
1241                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1242                     (tdba & 0x00000000ffffffffULL));
1243                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1244                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1245                     scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1246                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1247                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1248                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1249
1250                 /* Now enable */
1251                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1252                 txdctl |= IXGBE_TXDCTL_ENABLE;
1253                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1254         }
1255
1256         return;
1257 } /* ixv_initialize_transmit_units */
1258
1259 /************************************************************************
1260  * ixv_initialize_rss_mapping
1261  ************************************************************************/
1262 static void
1263 ixv_initialize_rss_mapping(struct adapter *adapter)
1264 {
1265         struct ixgbe_hw *hw = &adapter->hw;
1266         u32             reta = 0, mrqc, rss_key[10];
1267         int             queue_id;
1268         int             i, j;
1269         u32             rss_hash_config;
1270
1271         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1272                 /* Fetch the configured RSS key */
1273                 rss_getkey((uint8_t *)&rss_key);
1274         } else {
1275                 /* set up random bits */
1276                 arc4rand(&rss_key, sizeof(rss_key), 0);
1277         }
1278
1279         /* Now fill out hash function seeds */
1280         for (i = 0; i < 10; i++)
1281                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1282
1283         /* Set up the redirection table */
1284         for (i = 0, j = 0; i < 64; i++, j++) {
1285                 if (j == adapter->num_rx_queues)
1286                         j = 0;
1287
1288                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1289                         /*
1290                          * Fetch the RSS bucket id for the given indirection
1291                          * entry. Cap it at the number of configured buckets
1292                          * (which is num_rx_queues.)
1293                          */
1294                         queue_id = rss_get_indirection_to_bucket(i);
1295                         queue_id = queue_id % adapter->num_rx_queues;
1296                 } else
1297                         queue_id = j;
1298
1299                 /*
1300                  * The low 8 bits are for hash value (n+0);
1301                  * The next 8 bits are for hash value (n+1), etc.
1302                  */
1303                 reta >>= 8;
1304                 reta |= ((uint32_t)queue_id) << 24;
1305                 if ((i & 3) == 3) {
1306                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1307                         reta = 0;
1308                 }
1309         }
1310
1311         /* Perform hash on these packet types */
1312         if (adapter->feat_en & IXGBE_FEATURE_RSS)
1313                 rss_hash_config = rss_gethashconfig();
1314         else {
1315                 /*
1316                  * Disable UDP - IP fragments aren't currently being handled
1317                  * and so we end up with a mix of 2-tuple and 4-tuple
1318                  * traffic.
1319                  */
1320                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1321                                 | RSS_HASHTYPE_RSS_TCP_IPV4
1322                                 | RSS_HASHTYPE_RSS_IPV6
1323                                 | RSS_HASHTYPE_RSS_TCP_IPV6;
1324         }
1325
1326         mrqc = IXGBE_MRQC_RSSEN;
1327         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1328                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1329         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1330                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1331         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1332                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1333         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1334                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1335         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1336                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1337                     __func__);
1338         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1339                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1340                     __func__);
1341         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1342                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1343         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1344                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1345         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1346                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1347                     __func__);
1348         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1349 } /* ixv_initialize_rss_mapping */
1350
1351
1352 /************************************************************************
1353  * ixv_initialize_receive_units - Setup receive registers and features.
1354  ************************************************************************/
1355 static void
1356 ixv_initialize_receive_units(if_ctx_t ctx)
1357 {
1358         struct adapter     *adapter = iflib_get_softc(ctx);
1359         if_softc_ctx_t     scctx;
1360         struct ixgbe_hw    *hw = &adapter->hw;
1361         struct ifnet       *ifp = iflib_get_ifp(ctx);
1362         struct ix_rx_queue *que = adapter->rx_queues;
1363         u32                bufsz, psrtype;
1364
1365         if (ifp->if_mtu > ETHERMTU)
1366                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1367         else
1368                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1369
1370         psrtype = IXGBE_PSRTYPE_TCPHDR
1371                 | IXGBE_PSRTYPE_UDPHDR
1372                 | IXGBE_PSRTYPE_IPV4HDR
1373                 | IXGBE_PSRTYPE_IPV6HDR
1374                 | IXGBE_PSRTYPE_L2HDR;
1375
1376         if (adapter->num_rx_queues > 1)
1377                 psrtype |= 1 << 29;
1378
1379         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1380
1381         /* Tell PF our max_frame size */
1382         if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1383                 device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1384         }
1385         scctx = adapter->shared;
1386
1387         for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1388                 struct rx_ring *rxr = &que->rxr;
1389                 u64            rdba = rxr->rx_paddr;
1390                 u32            reg, rxdctl;
1391                 int            j = rxr->me;
1392
1393                 /* Disable the queue */
1394                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1395                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1396                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1397                 for (int k = 0; k < 10; k++) {
1398                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1399                             IXGBE_RXDCTL_ENABLE)
1400                                 msec_delay(1);
1401                         else
1402                                 break;
1403                 }
1404                 wmb();
1405                 /* Setup the Base and Length of the Rx Descriptor Ring */
1406                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1407                     (rdba & 0x00000000ffffffffULL));
1408                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1409                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1410                     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1411
1412                 /* Reset the ring indices */
1413                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1414                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1415
1416                 /* Set up the SRRCTL register */
1417                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1418                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1419                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1420                 reg |= bufsz;
1421                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1422                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1423
1424                 /* Capture Rx Tail index */
1425                 rxr->tail = IXGBE_VFRDT(rxr->me);
1426
1427                 /* Do the queue enabling last */
1428                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1429                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1430                 for (int l = 0; l < 10; l++) {
1431                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1432                             IXGBE_RXDCTL_ENABLE)
1433                                 break;
1434                         msec_delay(1);
1435                 }
1436                 wmb();
1437
1438                 /* Set the Tail Pointer */
1439 #ifdef DEV_NETMAP
1440                 /*
1441                  * In netmap mode, we must preserve the buffers made
1442                  * available to userspace before the if_init()
1443                  * (this is true by default on the TX side, because
1444                  * init makes all buffers available to userspace).
1445                  *
1446                  * netmap_reset() and the device specific routines
1447                  * (e.g. ixgbe_setup_receive_rings()) map these
1448                  * buffers at the end of the NIC ring, so here we
1449                  * must set the RDT (tail) register to make sure
1450                  * they are not overwritten.
1451                  *
1452                  * In this driver the NIC ring starts at RDH = 0,
1453                  * RDT points to the last slot available for reception (?),
1454                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1455                  */
1456                 if (ifp->if_capenable & IFCAP_NETMAP) {
1457                         struct netmap_adapter *na = NA(ifp);
1458                         struct netmap_kring *kring = na->rx_rings[j];
1459                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1460
1461                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1462                 } else
1463 #endif /* DEV_NETMAP */
1464                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1465                             scctx->isc_nrxd[0] - 1);
1466         }
1467
1468         ixv_initialize_rss_mapping(adapter);
1469 } /* ixv_initialize_receive_units */
1470
1471 /************************************************************************
1472  * ixv_setup_vlan_support
1473  ************************************************************************/
1474 static void
1475 ixv_setup_vlan_support(if_ctx_t ctx)
1476 {
1477         struct ifnet    *ifp = iflib_get_ifp(ctx);
1478         struct adapter  *adapter = iflib_get_softc(ctx);
1479         struct ixgbe_hw *hw = &adapter->hw;
1480         u32             ctrl, vid, vfta, retry;
1481
1482         /*
1483          * We get here thru if_init, meaning
1484          * a soft reset, this has already cleared
1485          * the VFTA and other state, so if there
1486          * have been no vlan's registered do nothing.
1487          */
1488         if (adapter->num_vlans == 0)
1489                 return;
1490
1491         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1492                 /* Enable the queues */
1493                 for (int i = 0; i < adapter->num_rx_queues; i++) {
1494                         ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1495                         ctrl |= IXGBE_RXDCTL_VME;
1496                         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1497                         /*
1498                          * Let Rx path know that it needs to store VLAN tag
1499                          * as part of extra mbuf info.
1500                          */
1501                         adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1502                 }
1503         }
1504
1505         /*
1506          * If filtering VLAN tags is disabled,
1507          * there is no need to fill VLAN Filter Table Array (VFTA).
1508          */
1509         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1510                 return;
1511
1512         /*
1513          * A soft reset zero's out the VFTA, so
1514          * we need to repopulate it now.
1515          */
1516         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1517                 if (ixv_shadow_vfta[i] == 0)
1518                         continue;
1519                 vfta = ixv_shadow_vfta[i];
1520                 /*
1521                  * Reconstruct the vlan id's
1522                  * based on the bits set in each
1523                  * of the array ints.
1524                  */
1525                 for (int j = 0; j < 32; j++) {
1526                         retry = 0;
1527                         if ((vfta & (1 << j)) == 0)
1528                                 continue;
1529                         vid = (i * 32) + j;
1530                         /* Call the shared code mailbox routine */
1531                         while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1532                                 if (++retry > 5)
1533                                         break;
1534                         }
1535                 }
1536         }
1537 } /* ixv_setup_vlan_support */
1538
1539 /************************************************************************
1540  * ixv_if_register_vlan
1541  *
1542  *   Run via a vlan config EVENT, it enables us to use the
1543  *   HW Filter table since we can get the vlan id. This just
1544  *   creates the entry in the soft version of the VFTA, init
1545  *   will repopulate the real table.
1546  ************************************************************************/
1547 static void
1548 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1549 {
1550         struct adapter *adapter = iflib_get_softc(ctx);
1551         u16            index, bit;
1552
1553         index = (vtag >> 5) & 0x7F;
1554         bit = vtag & 0x1F;
1555         ixv_shadow_vfta[index] |= (1 << bit);
1556         ++adapter->num_vlans;
1557 } /* ixv_if_register_vlan */
1558
1559 /************************************************************************
1560  * ixv_if_unregister_vlan
1561  *
1562  *   Run via a vlan unconfig EVENT, remove our entry
1563  *   in the soft vfta.
1564  ************************************************************************/
1565 static void
1566 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1567 {
1568         struct adapter *adapter = iflib_get_softc(ctx);
1569         u16            index, bit;
1570
1571         index = (vtag >> 5) & 0x7F;
1572         bit = vtag & 0x1F;
1573         ixv_shadow_vfta[index] &= ~(1 << bit);
1574         --adapter->num_vlans;
1575 } /* ixv_if_unregister_vlan */
1576
1577 /************************************************************************
1578  * ixv_if_enable_intr
1579  ************************************************************************/
1580 static void
1581 ixv_if_enable_intr(if_ctx_t ctx)
1582 {
1583         struct adapter  *adapter = iflib_get_softc(ctx);
1584         struct ixgbe_hw *hw = &adapter->hw;
1585         struct ix_rx_queue *que = adapter->rx_queues;
1586         u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1587
1588         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1589
1590         mask = IXGBE_EIMS_ENABLE_MASK;
1591         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1592         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1593
1594         for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1595                 ixv_enable_queue(adapter, que->msix);
1596
1597         IXGBE_WRITE_FLUSH(hw);
1598 } /* ixv_if_enable_intr */
1599
1600 /************************************************************************
1601  * ixv_if_disable_intr
1602  ************************************************************************/
1603 static void
1604 ixv_if_disable_intr(if_ctx_t ctx)
1605 {
1606         struct adapter *adapter = iflib_get_softc(ctx);
1607         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1608         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1609         IXGBE_WRITE_FLUSH(&adapter->hw);
1610 } /* ixv_if_disable_intr */
1611
1612 /************************************************************************
1613  * ixv_if_rx_queue_intr_enable
1614  ************************************************************************/
1615 static int
1616 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1617 {
1618         struct adapter  *adapter = iflib_get_softc(ctx);
1619         struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1620
1621         ixv_enable_queue(adapter, que->rxr.me);
1622
1623         return (0);
1624 } /* ixv_if_rx_queue_intr_enable */
1625
1626 /************************************************************************
1627  * ixv_set_ivar
1628  *
1629  *   Setup the correct IVAR register for a particular MSI-X interrupt
1630  *    - entry is the register array entry
1631  *    - vector is the MSI-X vector for this queue
1632  *    - type is RX/TX/MISC
1633  ************************************************************************/
1634 static void
1635 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1636 {
1637         struct ixgbe_hw *hw = &adapter->hw;
1638         u32             ivar, index;
1639
1640         vector |= IXGBE_IVAR_ALLOC_VAL;
1641
1642         if (type == -1) { /* MISC IVAR */
1643                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1644                 ivar &= ~0xFF;
1645                 ivar |= vector;
1646                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1647         } else {          /* RX/TX IVARS */
1648                 index = (16 * (entry & 1)) + (8 * type);
1649                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1650                 ivar &= ~(0xFF << index);
1651                 ivar |= (vector << index);
1652                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1653         }
1654 } /* ixv_set_ivar */
1655
1656 /************************************************************************
1657  * ixv_configure_ivars
1658  ************************************************************************/
1659 static void
1660 ixv_configure_ivars(struct adapter *adapter)
1661 {
1662         struct ix_rx_queue *que = adapter->rx_queues;
1663
1664         MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1665
1666         for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1667                 /* First the RX queue entry */
1668                 ixv_set_ivar(adapter, i, que->msix, 0);
1669                 /* ... and the TX */
1670                 ixv_set_ivar(adapter, i, que->msix, 1);
1671                 /* Set an initial value in EITR */
1672                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1673                     IXGBE_EITR_DEFAULT);
1674         }
1675
1676         /* For the mailbox interrupt */
1677         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1678 } /* ixv_configure_ivars */
1679
1680 /************************************************************************
1681  * ixv_save_stats
1682  *
1683  *   The VF stats registers never have a truly virgin
1684  *   starting point, so this routine tries to make an
1685  *   artificial one, marking ground zero on attach as
1686  *   it were.
1687  ************************************************************************/
1688 static void
1689 ixv_save_stats(struct adapter *adapter)
1690 {
1691         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1692                 adapter->stats.vf.saved_reset_vfgprc +=
1693                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1694                 adapter->stats.vf.saved_reset_vfgptc +=
1695                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1696                 adapter->stats.vf.saved_reset_vfgorc +=
1697                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1698                 adapter->stats.vf.saved_reset_vfgotc +=
1699                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1700                 adapter->stats.vf.saved_reset_vfmprc +=
1701                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1702         }
1703 } /* ixv_save_stats */
1704
1705 /************************************************************************
1706  * ixv_init_stats
1707  ************************************************************************/
1708 static void
1709 ixv_init_stats(struct adapter *adapter)
1710 {
1711         struct ixgbe_hw *hw = &adapter->hw;
1712
1713         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1714         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1715         adapter->stats.vf.last_vfgorc |=
1716             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1717
1718         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1719         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1720         adapter->stats.vf.last_vfgotc |=
1721             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1722
1723         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1724
1725         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1726         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1727         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1728         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1729         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1730 } /* ixv_init_stats */
1731
1732 #define UPDATE_STAT_32(reg, last, count)                \
1733 {                                                       \
1734         u32 current = IXGBE_READ_REG(hw, reg);          \
1735         if (current < last)                             \
1736                 count += 0x100000000LL;                 \
1737         last = current;                                 \
1738         count &= 0xFFFFFFFF00000000LL;                  \
1739         count |= current;                               \
1740 }
1741
1742 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1743 {                                                       \
1744         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1745         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1746         u64 current = ((cur_msb << 32) | cur_lsb);      \
1747         if (current < last)                             \
1748                 count += 0x1000000000LL;                \
1749         last = current;                                 \
1750         count &= 0xFFFFFFF000000000LL;                  \
1751         count |= current;                               \
1752 }
1753
1754 /************************************************************************
1755  * ixv_update_stats - Update the board statistics counters.
1756  ************************************************************************/
1757 void
1758 ixv_update_stats(struct adapter *adapter)
1759 {
1760         struct ixgbe_hw *hw = &adapter->hw;
1761         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1762
1763         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1764             adapter->stats.vf.vfgprc);
1765         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1766             adapter->stats.vf.vfgptc);
1767         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1768             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1769         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1770             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1771         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1772             adapter->stats.vf.vfmprc);
1773
1774         /* Fill out the OS statistics structure */
1775         IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1776         IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1777         IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1778         IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1779         IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1780 } /* ixv_update_stats */
1781
1782 /************************************************************************
1783  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1784  ************************************************************************/
1785 static void
1786 ixv_add_stats_sysctls(struct adapter *adapter)
1787 {
1788         device_t                dev = adapter->dev;
1789         struct ix_tx_queue      *tx_que = adapter->tx_queues;
1790         struct ix_rx_queue      *rx_que = adapter->rx_queues;
1791         struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1792         struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1793         struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1794         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1795         struct sysctl_oid       *stat_node, *queue_node;
1796         struct sysctl_oid_list  *stat_list, *queue_list;
1797
1798 #define QUEUE_NAME_LEN 32
1799         char                    namebuf[QUEUE_NAME_LEN];
1800
1801         /* Driver Statistics */
1802         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1803             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1804         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1805             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1806
1807         for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1808                 struct tx_ring *txr = &tx_que->txr;
1809                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1810                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1811                     CTLFLAG_RD, NULL, "Queue Name");
1812                 queue_list = SYSCTL_CHILDREN(queue_node);
1813
1814                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1815                     CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1816                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1817                     CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1818         }
1819
1820         for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1821                 struct rx_ring *rxr = &rx_que->rxr;
1822                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1823                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1824                     CTLFLAG_RD, NULL, "Queue Name");
1825                 queue_list = SYSCTL_CHILDREN(queue_node);
1826
1827                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1828                     CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1829                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1830                     CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1831                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1832                     CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1833                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1834                     CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1835         }
1836
1837         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1838             CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1839         stat_list = SYSCTL_CHILDREN(stat_node);
1840
1841         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1842             CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1843         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1844             CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1845         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1846             CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1847         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1848             CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1849         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1850             CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1851 } /* ixv_add_stats_sysctls */
1852
1853 /************************************************************************
1854  * ixv_print_debug_info
1855  *
1856  *   Called only when em_display_debug_stats is enabled.
1857  *   Provides a way to take a look at important statistics
1858  *   maintained by the driver and hardware.
1859  ************************************************************************/
1860 static void
1861 ixv_print_debug_info(struct adapter *adapter)
1862 {
1863         device_t        dev = adapter->dev;
1864         struct ixgbe_hw *hw = &adapter->hw;
1865
1866         device_printf(dev, "Error Byte Count = %u \n",
1867             IXGBE_READ_REG(hw, IXGBE_ERRBC));
1868
1869         device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1870 } /* ixv_print_debug_info */
1871
1872 /************************************************************************
1873  * ixv_sysctl_debug
1874  ************************************************************************/
1875 static int
1876 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1877 {
1878         struct adapter *adapter;
1879         int            error, result;
1880
1881         result = -1;
1882         error = sysctl_handle_int(oidp, &result, 0, req);
1883
1884         if (error || !req->newptr)
1885                 return (error);
1886
1887         if (result == 1) {
1888                 adapter = (struct adapter *)arg1;
1889                 ixv_print_debug_info(adapter);
1890         }
1891
1892         return error;
1893 } /* ixv_sysctl_debug */
1894
1895 /************************************************************************
1896  * ixv_init_device_features
1897  ************************************************************************/
1898 static void
1899 ixv_init_device_features(struct adapter *adapter)
1900 {
1901         adapter->feat_cap = IXGBE_FEATURE_NETMAP
1902                           | IXGBE_FEATURE_VF
1903                           | IXGBE_FEATURE_RSS
1904                           | IXGBE_FEATURE_LEGACY_TX;
1905
1906         /* A tad short on feature flags for VFs, atm. */
1907         switch (adapter->hw.mac.type) {
1908         case ixgbe_mac_82599_vf:
1909                 break;
1910         case ixgbe_mac_X540_vf:
1911                 break;
1912         case ixgbe_mac_X550_vf:
1913         case ixgbe_mac_X550EM_x_vf:
1914         case ixgbe_mac_X550EM_a_vf:
1915                 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1916                 break;
1917         default:
1918                 break;
1919         }
1920
1921         /* Enabled by default... */
1922         /* Is a virtual function (VF) */
1923         if (adapter->feat_cap & IXGBE_FEATURE_VF)
1924                 adapter->feat_en |= IXGBE_FEATURE_VF;
1925         /* Netmap */
1926         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1927                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1928         /* Receive-Side Scaling (RSS) */
1929         if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1930                 adapter->feat_en |= IXGBE_FEATURE_RSS;
1931         /* Needs advanced context descriptor regardless of offloads req'd */
1932         if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1933                 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1934 } /* ixv_init_device_features */
1935