]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ixv.c
Reapply, with minor tweaks, r338025, from the original commit:
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
49
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixv_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66         /* required last entry */
67 PVID_END
68 };
69
70 /************************************************************************
71  * Function prototypes
72  ************************************************************************/
73 static void     *ixv_register(device_t dev);
74 static int      ixv_if_attach_pre(if_ctx_t ctx);
75 static int      ixv_if_attach_post(if_ctx_t ctx);
76 static int      ixv_if_detach(if_ctx_t ctx);
77
78 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void     ixv_if_queues_free(if_ctx_t ctx);
82 static void     ixv_identify_hardware(if_ctx_t ctx);
83 static void     ixv_init_device_features(struct adapter *);
84 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void     ixv_free_pci_resources(if_ctx_t ctx);
86 static int      ixv_setup_interface(if_ctx_t ctx);
87 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int      ixv_if_media_change(if_ctx_t ctx);
89 static void     ixv_if_update_admin_status(if_ctx_t ctx);
90 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
91
92 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void     ixv_if_init(if_ctx_t ctx);
94 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void     ixv_if_stop(if_ctx_t ctx);
96 static int      ixv_negotiate_api(struct adapter *);
97
98 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void     ixv_initialize_receive_units(if_ctx_t ctx);
100 static void     ixv_initialize_rss_mapping(struct adapter *);
101
102 static void     ixv_setup_vlan_support(if_ctx_t ctx);
103 static void     ixv_configure_ivars(struct adapter *);
104 static void     ixv_if_enable_intr(if_ctx_t ctx);
105 static void     ixv_if_disable_intr(if_ctx_t ctx);
106 static void     ixv_if_multi_set(if_ctx_t ctx);
107
108 static void     ixv_if_register_vlan(if_ctx_t, u16);
109 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
110
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112
113 static void     ixv_save_stats(struct adapter *);
114 static void     ixv_init_stats(struct adapter *);
115 static void     ixv_update_stats(struct adapter *);
116 static void     ixv_add_stats_sysctls(struct adapter *adapter);
117
118 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
120
121 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
122
123 /* The MSI-X Interrupt handlers */
124 static int      ixv_msix_que(void *);
125 static int      ixv_msix_mbx(void *);
126
127 /************************************************************************
128  * FreeBSD Device Interface Entry Points
129  ************************************************************************/
130 static device_method_t ixv_methods[] = {
131         /* Device interface */
132         DEVMETHOD(device_register, ixv_register),
133         DEVMETHOD(device_probe, iflib_device_probe),
134         DEVMETHOD(device_attach, iflib_device_attach),
135         DEVMETHOD(device_detach, iflib_device_detach),
136         DEVMETHOD(device_shutdown, iflib_device_shutdown),
137         DEVMETHOD_END
138 };
139
140 static driver_t ixv_driver = {
141         "ixv", ixv_methods, sizeof(struct adapter),
142 };
143
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ixv, ixv_vendor_info_array,
147     nitems(ixv_vendor_info_array) - 1);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
150 #ifdef DEV_NETMAP
151 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
152 #endif /* DEV_NETMAP */
153
154 static device_method_t ixv_if_methods[] = {
155         DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
156         DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
157         DEVMETHOD(ifdi_detach, ixv_if_detach),
158         DEVMETHOD(ifdi_init, ixv_if_init),
159         DEVMETHOD(ifdi_stop, ixv_if_stop),
160         DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
161         DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
162         DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
163         DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164         DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
165         DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
166         DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
167         DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
168         DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
169         DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
170         DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
171         DEVMETHOD(ifdi_media_status, ixv_if_media_status),
172         DEVMETHOD(ifdi_media_change, ixv_if_media_change),
173         DEVMETHOD(ifdi_timer, ixv_if_local_timer),
174         DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
175         DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
176         DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
177         DEVMETHOD_END
178 };
179
180 static driver_t ixv_if_driver = {
181   "ixv_if", ixv_if_methods, sizeof(struct adapter)
182 };
183
184 /*
185  * TUNEABLE PARAMETERS:
186  */
187
188 /* Flow control setting, default to full */
189 static int ixv_flow_control = ixgbe_fc_full;
190 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191
192 /*
193  * Header split: this causes the hardware to DMA
194  * the header into a separate mbuf from the payload,
195  * it can be a performance win in some workloads, but
196  * in others it actually hurts, its off by default.
197  */
198 static int ixv_header_split = FALSE;
199 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200
201 /*
202  * Shadow VFTA table, this is needed because
203  * the real filter table gets cleared during
204  * a soft reset and we need to repopulate it.
205  */
206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 extern struct if_txrx ixgbe_txrx;
208
209 static struct if_shared_ctx ixv_sctx_init = {
210         .isc_magic = IFLIB_MAGIC,
211         .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212         .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
213         .isc_tx_maxsegsize = PAGE_SIZE,
214         .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
215         .isc_tso_maxsegsize = PAGE_SIZE,
216         .isc_rx_maxsize = MJUM16BYTES,
217         .isc_rx_nsegments = 1,
218         .isc_rx_maxsegsize = MJUM16BYTES,
219         .isc_nfl = 1,
220         .isc_ntxqs = 1,
221         .isc_nrxqs = 1,
222         .isc_admin_intrcnt = 1,
223         .isc_vendor_info = ixv_vendor_info_array,
224         .isc_driver_version = ixv_driver_version,
225         .isc_driver = &ixv_if_driver,
226
227         .isc_nrxd_min = {MIN_RXD},
228         .isc_ntxd_min = {MIN_TXD},
229         .isc_nrxd_max = {MAX_RXD},
230         .isc_ntxd_max = {MAX_TXD},
231         .isc_nrxd_default = {DEFAULT_RXD},
232         .isc_ntxd_default = {DEFAULT_TXD},
233 };
234
235 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
236
237 static void *
238 ixv_register(device_t dev)
239 {
240         return (ixv_sctx);
241 }
242
243 /************************************************************************
244  * ixv_if_tx_queues_alloc
245  ************************************************************************/
246 static int
247 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
248                        int ntxqs, int ntxqsets)
249 {
250         struct adapter     *adapter = iflib_get_softc(ctx);
251         if_softc_ctx_t     scctx = adapter->shared;
252         struct ix_tx_queue *que;
253         int                i, j, error;
254
255         MPASS(adapter->num_tx_queues == ntxqsets);
256         MPASS(ntxqs == 1);
257
258         /* Allocate queue structure memory */
259         adapter->tx_queues =
260             (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
261                                          M_DEVBUF, M_NOWAIT | M_ZERO);
262         if (!adapter->tx_queues) {
263                 device_printf(iflib_get_dev(ctx),
264                     "Unable to allocate TX ring memory\n");
265                 return (ENOMEM);
266         }
267
268         for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
269                 struct tx_ring *txr = &que->txr;
270
271                 txr->me = i;
272                 txr->adapter =  que->adapter = adapter;
273                 adapter->active_queues |= (u64)1 << txr->me;
274
275                 /* Allocate report status array */
276                 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
277                         error = ENOMEM;
278                         goto fail;
279                 }
280                 for (j = 0; j < scctx->isc_ntxd[0]; j++)
281                         txr->tx_rsq[j] = QIDX_INVALID;
282                 /* get the virtual and physical address of the hardware queues */
283                 txr->tail = IXGBE_VFTDT(txr->me);
284                 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
285                 txr->tx_paddr = paddrs[i*ntxqs];
286
287                 txr->bytes = 0;
288                 txr->total_packets = 0;
289
290         }
291
292         device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
293             adapter->num_tx_queues);
294
295         return (0);
296
297  fail:
298         ixv_if_queues_free(ctx);
299
300         return (error);
301 } /* ixv_if_tx_queues_alloc */
302
303 /************************************************************************
304  * ixv_if_rx_queues_alloc
305  ************************************************************************/
306 static int
307 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
308                        int nrxqs, int nrxqsets)
309 {
310         struct adapter     *adapter = iflib_get_softc(ctx);
311         struct ix_rx_queue *que;
312         int                i, error;
313
314         MPASS(adapter->num_rx_queues == nrxqsets);
315         MPASS(nrxqs == 1);
316
317         /* Allocate queue structure memory */
318         adapter->rx_queues =
319             (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
320                                          M_DEVBUF, M_NOWAIT | M_ZERO);
321         if (!adapter->rx_queues) {
322                 device_printf(iflib_get_dev(ctx),
323                     "Unable to allocate TX ring memory\n");
324                 error = ENOMEM;
325                 goto fail;
326         }
327
328         for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
329                 struct rx_ring *rxr = &que->rxr;
330                 rxr->me = i;
331                 rxr->adapter = que->adapter = adapter;
332
333
334                 /* get the virtual and physical address of the hw queues */
335                 rxr->tail = IXGBE_VFRDT(rxr->me);
336                 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
337                 rxr->rx_paddr = paddrs[i*nrxqs];
338                 rxr->bytes = 0;
339                 rxr->que = que;
340         }
341
342         device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
343             adapter->num_rx_queues);
344
345         return (0);
346
347 fail:
348         ixv_if_queues_free(ctx);
349
350         return (error);
351 } /* ixv_if_rx_queues_alloc */
352
353 /************************************************************************
354  * ixv_if_queues_free
355  ************************************************************************/
356 static void
357 ixv_if_queues_free(if_ctx_t ctx)
358 {
359         struct adapter     *adapter = iflib_get_softc(ctx);
360         struct ix_tx_queue *que = adapter->tx_queues;
361         int                i;
362
363         if (que == NULL)
364                 goto free;
365
366         for (i = 0; i < adapter->num_tx_queues; i++, que++) {
367                 struct tx_ring *txr = &que->txr;
368                 if (txr->tx_rsq == NULL)
369                         break;
370
371                 free(txr->tx_rsq, M_DEVBUF);
372                 txr->tx_rsq = NULL;
373         }
374         if (adapter->tx_queues != NULL)
375                 free(adapter->tx_queues, M_DEVBUF);
376 free:
377         if (adapter->rx_queues != NULL)
378                 free(adapter->rx_queues, M_DEVBUF);
379         adapter->tx_queues = NULL;
380         adapter->rx_queues = NULL;
381 } /* ixv_if_queues_free */
382
383 /************************************************************************
384  * ixv_if_attach_pre - Device initialization routine
385  *
386  *   Called when the driver is being loaded.
387  *   Identifies the type of hardware, allocates all resources
388  *   and initializes the hardware.
389  *
390  *   return 0 on success, positive on failure
391  ************************************************************************/
392 static int
393 ixv_if_attach_pre(if_ctx_t ctx)
394 {
395         struct adapter  *adapter;
396         device_t        dev;
397         if_softc_ctx_t  scctx;
398         struct ixgbe_hw *hw;
399         int             error = 0;
400
401         INIT_DEBUGOUT("ixv_attach: begin");
402
403         /* Allocate, clear, and link in our adapter structure */
404         dev = iflib_get_dev(ctx);
405         adapter = iflib_get_softc(ctx);
406         adapter->dev = dev;
407         adapter->ctx = ctx;
408         adapter->hw.back = adapter;
409         scctx = adapter->shared = iflib_get_softc_ctx(ctx);
410         adapter->media = iflib_get_media(ctx);
411         hw = &adapter->hw;
412
413         /* Do base PCI setup - map BAR0 */
414         if (ixv_allocate_pci_resources(ctx)) {
415                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
416                 error = ENXIO;
417                 goto err_out;
418         }
419
420         /* SYSCTL APIs */
421         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
422             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
423             CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
424             "Debug Info");
425
426         /* Determine hardware revision */
427         ixv_identify_hardware(ctx);
428         ixv_init_device_features(adapter);
429
430         /* Initialize the shared code */
431         error = ixgbe_init_ops_vf(hw);
432         if (error) {
433                 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
434                 error = EIO;
435                 goto err_out;
436         }
437
438         /* Setup the mailbox */
439         ixgbe_init_mbx_params_vf(hw);
440
441         error = hw->mac.ops.reset_hw(hw);
442         if (error == IXGBE_ERR_RESET_FAILED)
443                 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
444         else if (error)
445                 device_printf(dev, "...reset_hw() failed with error %d\n",
446                     error);
447         if (error) {
448                 error = EIO;
449                 goto err_out;
450         }
451
452         error = hw->mac.ops.init_hw(hw);
453         if (error) {
454                 device_printf(dev, "...init_hw() failed with error %d\n",
455                     error);
456                 error = EIO;
457                 goto err_out;
458         }
459
460         /* Negotiate mailbox API version */
461         error = ixv_negotiate_api(adapter);
462         if (error) {
463                 device_printf(dev,
464                     "Mailbox API negotiation failed during attach!\n");
465                 goto err_out;
466         }
467
468         /* If no mac address was assigned, make a random one */
469         if (!ixv_check_ether_addr(hw->mac.addr)) {
470                 u8 addr[ETHER_ADDR_LEN];
471                 arc4rand(&addr, sizeof(addr), 0);
472                 addr[0] &= 0xFE;
473                 addr[0] |= 0x02;
474                 bcopy(addr, hw->mac.addr, sizeof(addr));
475                 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
476         }
477
478         /* Most of the iflib initialization... */
479
480         iflib_set_mac(ctx, hw->mac.addr);
481         switch (adapter->hw.mac.type) {
482         case ixgbe_mac_X550_vf:
483         case ixgbe_mac_X550EM_x_vf:
484         case ixgbe_mac_X550EM_a_vf:
485                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
486                 break;
487         default:
488                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
489         }
490         scctx->isc_txqsizes[0] =
491             roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
492             sizeof(u32), DBA_ALIGN);
493         scctx->isc_rxqsizes[0] =
494             roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
495             DBA_ALIGN);
496         /* XXX */
497         scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
498             CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
499         scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
500         scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
501         scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
502         scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
503         scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
504
505         scctx->isc_txrx = &ixgbe_txrx;
506
507         /*
508          * Tell the upper layer(s) we support everything the PF
509          * driver does except...
510          *   Wake-on-LAN
511          */
512         scctx->isc_capabilities = IXGBE_CAPS;
513         scctx->isc_capabilities ^= IFCAP_WOL;
514         scctx->isc_capenable = scctx->isc_capabilities;
515
516         INIT_DEBUGOUT("ixv_if_attach_pre: end");
517
518         return (0);
519
520 err_out:
521         ixv_free_pci_resources(ctx);
522
523         return (error);
524 } /* ixv_if_attach_pre */
525
526 static int
527 ixv_if_attach_post(if_ctx_t ctx)
528 {
529         struct adapter *adapter = iflib_get_softc(ctx);
530         device_t       dev = iflib_get_dev(ctx);
531         int            error = 0;
532
533         /* Setup OS specific network interface */
534         error = ixv_setup_interface(ctx);
535         if (error) {
536                 device_printf(dev, "Interface setup failed: %d\n", error);
537                 goto end;
538         }
539
540         /* Do the stats setup */
541         ixv_save_stats(adapter);
542         ixv_init_stats(adapter);
543         ixv_add_stats_sysctls(adapter);
544
545 end:
546         return error;
547 } /* ixv_if_attach_post */
548
549 /************************************************************************
550  * ixv_detach - Device removal routine
551  *
552  *   Called when the driver is being removed.
553  *   Stops the adapter and deallocates all the resources
554  *   that were allocated for driver operation.
555  *
556  *   return 0 on success, positive on failure
557  ************************************************************************/
558 static int
559 ixv_if_detach(if_ctx_t ctx)
560 {
561         INIT_DEBUGOUT("ixv_detach: begin");
562
563         ixv_free_pci_resources(ctx);
564
565         return (0);
566 } /* ixv_if_detach */
567
568 /************************************************************************
569  * ixv_if_mtu_set
570  ************************************************************************/
571 static int
572 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
573 {
574         struct adapter *adapter = iflib_get_softc(ctx);
575         struct ifnet   *ifp = iflib_get_ifp(ctx);
576         int            error = 0;
577
578         IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
579         if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
580                 error = EINVAL;
581         } else {
582                 ifp->if_mtu = mtu;
583                 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
584         }
585
586         return error;
587 } /* ixv_if_mtu_set */
588
589 /************************************************************************
590  * ixv_if_init - Init entry point
591  *
592  *   Used in two ways: It is used by the stack as an init entry
593  *   point in network interface structure. It is also used
594  *   by the driver as a hw/sw initialization routine to get
595  *   to a consistent state.
596  *
597  *   return 0 on success, positive on failure
598  ************************************************************************/
599 static void
600 ixv_if_init(if_ctx_t ctx)
601 {
602         struct adapter  *adapter = iflib_get_softc(ctx);
603         struct ifnet    *ifp = iflib_get_ifp(ctx);
604         device_t        dev = iflib_get_dev(ctx);
605         struct ixgbe_hw *hw = &adapter->hw;
606         int             error = 0;
607
608         INIT_DEBUGOUT("ixv_if_init: begin");
609         hw->adapter_stopped = FALSE;
610         hw->mac.ops.stop_adapter(hw);
611
612         /* reprogram the RAR[0] in case user changed it. */
613         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
614
615         /* Get the latest mac address, User can use a LAA */
616         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
617         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
618
619         /* Reset VF and renegotiate mailbox API version */
620         hw->mac.ops.reset_hw(hw);
621         hw->mac.ops.start_hw(hw);
622         error = ixv_negotiate_api(adapter);
623         if (error) {
624                 device_printf(dev,
625                     "Mailbox API negotiation failed in if_init!\n");
626                 return;
627         }
628
629         ixv_initialize_transmit_units(ctx);
630
631         /* Setup Multicast table */
632         ixv_if_multi_set(ctx);
633
634         /*
635          * Determine the correct mbuf pool
636          * for doing jumbo/headersplit
637          */
638         if (ifp->if_mtu > ETHERMTU)
639                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
640         else
641                 adapter->rx_mbuf_sz = MCLBYTES;
642
643         /* Configure RX settings */
644         ixv_initialize_receive_units(ctx);
645
646         /* Set up VLAN offload and filter */
647         ixv_setup_vlan_support(ctx);
648
649         /* Set up MSI-X routing */
650         ixv_configure_ivars(adapter);
651
652         /* Set up auto-mask */
653         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
654
655         /* Set moderation on the Link interrupt */
656         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
657
658         /* Stats init */
659         ixv_init_stats(adapter);
660
661         /* Config/Enable Link */
662         hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
663             FALSE);
664
665         /* And now turn on interrupts */
666         ixv_if_enable_intr(ctx);
667
668         return;
669 } /* ixv_if_init */
670
671 /************************************************************************
672  * ixv_enable_queue
673  ************************************************************************/
674 static inline void
675 ixv_enable_queue(struct adapter *adapter, u32 vector)
676 {
677         struct ixgbe_hw *hw = &adapter->hw;
678         u32             queue = 1 << vector;
679         u32             mask;
680
681         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
682         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
683 } /* ixv_enable_queue */
684
685 /************************************************************************
686  * ixv_disable_queue
687  ************************************************************************/
688 static inline void
689 ixv_disable_queue(struct adapter *adapter, u32 vector)
690 {
691         struct ixgbe_hw *hw = &adapter->hw;
692         u64             queue = (u64)(1 << vector);
693         u32             mask;
694
695         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
696         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
697 } /* ixv_disable_queue */
698
699
700 /************************************************************************
701  * ixv_msix_que - MSI-X Queue Interrupt Service routine
702  ************************************************************************/
703 static int
704 ixv_msix_que(void *arg)
705 {
706         struct ix_rx_queue *que = arg;
707         struct adapter     *adapter = que->adapter;
708
709         ixv_disable_queue(adapter, que->msix);
710         ++que->irqs;
711
712         return (FILTER_SCHEDULE_THREAD);
713 } /* ixv_msix_que */
714
715 /************************************************************************
716  * ixv_msix_mbx
717  ************************************************************************/
718 static int
719 ixv_msix_mbx(void *arg)
720 {
721         struct adapter  *adapter = arg;
722         struct ixgbe_hw *hw = &adapter->hw;
723         u32             reg;
724
725         ++adapter->link_irq;
726
727         /* First get the cause */
728         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
729         /* Clear interrupt with write */
730         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
731
732         /* Link status change */
733         if (reg & IXGBE_EICR_LSC)
734                 iflib_admin_intr_deferred(adapter->ctx);
735
736         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
737
738         return (FILTER_HANDLED);
739 } /* ixv_msix_mbx */
740
741 /************************************************************************
742  * ixv_media_status - Media Ioctl callback
743  *
744  *   Called whenever the user queries the status of
745  *   the interface using ifconfig.
746  ************************************************************************/
747 static void
748 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
749 {
750         struct adapter *adapter = iflib_get_softc(ctx);
751
752         INIT_DEBUGOUT("ixv_media_status: begin");
753
754         iflib_admin_intr_deferred(ctx);
755
756         ifmr->ifm_status = IFM_AVALID;
757         ifmr->ifm_active = IFM_ETHER;
758
759         if (!adapter->link_active)
760                 return;
761
762         ifmr->ifm_status |= IFM_ACTIVE;
763
764         switch (adapter->link_speed) {
765                 case IXGBE_LINK_SPEED_1GB_FULL:
766                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
767                         break;
768                 case IXGBE_LINK_SPEED_10GB_FULL:
769                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
770                         break;
771                 case IXGBE_LINK_SPEED_100_FULL:
772                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
773                         break;
774                 case IXGBE_LINK_SPEED_10_FULL:
775                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
776                         break;
777         }
778 } /* ixv_if_media_status */
779
780 /************************************************************************
781  * ixv_if_media_change - Media Ioctl callback
782  *
783  *   Called when the user changes speed/duplex using
784  *   media/mediopt option with ifconfig.
785  ************************************************************************/
786 static int
787 ixv_if_media_change(if_ctx_t ctx)
788 {
789         struct adapter *adapter = iflib_get_softc(ctx);
790         struct ifmedia *ifm = iflib_get_media(ctx);
791
792         INIT_DEBUGOUT("ixv_media_change: begin");
793
794         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
795                 return (EINVAL);
796
797         switch (IFM_SUBTYPE(ifm->ifm_media)) {
798         case IFM_AUTO:
799                 break;
800         default:
801                 device_printf(adapter->dev, "Only auto media type\n");
802                 return (EINVAL);
803         }
804
805         return (0);
806 } /* ixv_if_media_change */
807
808
809 /************************************************************************
810  * ixv_negotiate_api
811  *
812  *   Negotiate the Mailbox API with the PF;
813  *   start with the most featured API first.
814  ************************************************************************/
815 static int
816 ixv_negotiate_api(struct adapter *adapter)
817 {
818         struct ixgbe_hw *hw = &adapter->hw;
819         int             mbx_api[] = { ixgbe_mbox_api_11,
820                                       ixgbe_mbox_api_10,
821                                       ixgbe_mbox_api_unknown };
822         int             i = 0;
823
824         while (mbx_api[i] != ixgbe_mbox_api_unknown) {
825                 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
826                         return (0);
827                 i++;
828         }
829
830         return (EINVAL);
831 } /* ixv_negotiate_api */
832
833
834 /************************************************************************
835  * ixv_if_multi_set - Multicast Update
836  *
837  *   Called whenever multicast address list is updated.
838  ************************************************************************/
839 static void
840 ixv_if_multi_set(if_ctx_t ctx)
841 {
842         u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
843         struct adapter     *adapter = iflib_get_softc(ctx);
844         u8                 *update_ptr;
845         struct ifmultiaddr *ifma;
846         if_t               ifp = iflib_get_ifp(ctx);
847         int                mcnt = 0;
848
849         IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
850
851         CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
852                 if (ifma->ifma_addr->sa_family != AF_LINK)
853                         continue;
854                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
855                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
856                     IXGBE_ETH_LENGTH_OF_ADDRESS);
857                 mcnt++;
858         }
859
860         update_ptr = mta;
861
862         adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
863             ixv_mc_array_itr, TRUE);
864 } /* ixv_if_multi_set */
865
866 /************************************************************************
867  * ixv_mc_array_itr
868  *
869  *   An iterator function needed by the multicast shared code.
870  *   It feeds the shared code routine the addresses in the
871  *   array of ixv_set_multi() one by one.
872  ************************************************************************/
873 static u8 *
874 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
875 {
876         u8 *addr = *update_ptr;
877         u8 *newptr;
878
879         *vmdq = 0;
880
881         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
882         *update_ptr = newptr;
883
884         return addr;
885 } /* ixv_mc_array_itr */
886
887 /************************************************************************
888  * ixv_if_local_timer - Timer routine
889  *
890  *   Checks for link status, updates statistics,
891  *   and runs the watchdog check.
892  ************************************************************************/
893 static void
894 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
895 {
896         if (qid != 0)
897                 return;
898
899         /* Fire off the adminq task */
900         iflib_admin_intr_deferred(ctx);
901 } /* ixv_if_local_timer */
902
903 /************************************************************************
904  * ixv_if_update_admin_status - Update OS on link state
905  *
906  * Note: Only updates the OS on the cached link state.
907  *       The real check of the hardware only happens with
908  *       a link interrupt.
909  ************************************************************************/
910 static void
911 ixv_if_update_admin_status(if_ctx_t ctx)
912 {
913         struct adapter *adapter = iflib_get_softc(ctx);
914         device_t       dev = iflib_get_dev(ctx);
915         s32            status;
916
917         adapter->hw.mac.get_link_status = TRUE;
918
919         status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
920             &adapter->link_up, FALSE);
921
922         if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
923                 /* Mailbox's Clear To Send status is lost or timeout occurred.
924                  * We need reinitialization. */
925                 iflib_get_ifp(ctx)->if_init(ctx);
926         }
927
928         if (adapter->link_up) {
929                 if (adapter->link_active == FALSE) {
930                         if (bootverbose)
931                                 device_printf(dev, "Link is up %d Gbps %s \n",
932                                     ((adapter->link_speed == 128) ? 10 : 1),
933                                     "Full Duplex");
934                         adapter->link_active = TRUE;
935                         iflib_link_state_change(ctx, LINK_STATE_UP,
936                             IF_Gbps(10));
937                 }
938         } else { /* Link down */
939                 if (adapter->link_active == TRUE) {
940                         if (bootverbose)
941                                 device_printf(dev, "Link is Down\n");
942                         iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
943                         adapter->link_active = FALSE;
944                 }
945         }
946
947         /* Stats Update */
948         ixv_update_stats(adapter);
949 } /* ixv_if_update_admin_status */
950
951
952 /************************************************************************
953  * ixv_if_stop - Stop the hardware
954  *
955  *   Disables all traffic on the adapter by issuing a
956  *   global reset on the MAC and deallocates TX/RX buffers.
957  ************************************************************************/
958 static void
959 ixv_if_stop(if_ctx_t ctx)
960 {
961         struct adapter  *adapter = iflib_get_softc(ctx);
962         struct ixgbe_hw *hw = &adapter->hw;
963
964         INIT_DEBUGOUT("ixv_stop: begin\n");
965
966         ixv_if_disable_intr(ctx);
967
968         hw->mac.ops.reset_hw(hw);
969         adapter->hw.adapter_stopped = FALSE;
970         hw->mac.ops.stop_adapter(hw);
971
972         /* Update the stack */
973         adapter->link_up = FALSE;
974         ixv_if_update_admin_status(ctx);
975
976         /* reprogram the RAR[0] in case user changed it. */
977         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
978 } /* ixv_if_stop */
979
980
981 /************************************************************************
982  * ixv_identify_hardware - Determine hardware revision.
983  ************************************************************************/
984 static void
985 ixv_identify_hardware(if_ctx_t ctx)
986 {
987         struct adapter  *adapter = iflib_get_softc(ctx);
988         device_t        dev = iflib_get_dev(ctx);
989         struct ixgbe_hw *hw = &adapter->hw;
990
991         /* Save off the information about this board */
992         hw->vendor_id = pci_get_vendor(dev);
993         hw->device_id = pci_get_device(dev);
994         hw->revision_id = pci_get_revid(dev);
995         hw->subsystem_vendor_id = pci_get_subvendor(dev);
996         hw->subsystem_device_id = pci_get_subdevice(dev);
997
998         /* A subset of set_mac_type */
999         switch (hw->device_id) {
1000         case IXGBE_DEV_ID_82599_VF:
1001                 hw->mac.type = ixgbe_mac_82599_vf;
1002                 break;
1003         case IXGBE_DEV_ID_X540_VF:
1004                 hw->mac.type = ixgbe_mac_X540_vf;
1005                 break;
1006         case IXGBE_DEV_ID_X550_VF:
1007                 hw->mac.type = ixgbe_mac_X550_vf;
1008                 break;
1009         case IXGBE_DEV_ID_X550EM_X_VF:
1010                 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1011                 break;
1012         case IXGBE_DEV_ID_X550EM_A_VF:
1013                 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1014                 break;
1015         default:
1016                 device_printf(dev, "unknown mac type\n");
1017                 hw->mac.type = ixgbe_mac_unknown;
1018                 break;
1019         }
1020 } /* ixv_identify_hardware */
1021
1022 /************************************************************************
1023  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1024  ************************************************************************/
1025 static int
1026 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1027 {
1028         struct adapter     *adapter = iflib_get_softc(ctx);
1029         device_t           dev = iflib_get_dev(ctx);
1030         struct ix_rx_queue *rx_que = adapter->rx_queues;
1031         struct ix_tx_queue *tx_que;
1032         int                error, rid, vector = 0;
1033         char               buf[16];
1034
1035         for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1036                 rid = vector + 1;
1037
1038                 snprintf(buf, sizeof(buf), "rxq%d", i);
1039                 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1040                     IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1041
1042                 if (error) {
1043                         device_printf(iflib_get_dev(ctx),
1044                             "Failed to allocate que int %d err: %d", i, error);
1045                         adapter->num_rx_queues = i + 1;
1046                         goto fail;
1047                 }
1048
1049                 rx_que->msix = vector;
1050                 adapter->active_queues |= (u64)(1 << rx_que->msix);
1051
1052         }
1053
1054         for (int i = 0; i < adapter->num_tx_queues; i++) {
1055                 snprintf(buf, sizeof(buf), "txq%d", i);
1056                 tx_que = &adapter->tx_queues[i];
1057                 tx_que->msix = i % adapter->num_rx_queues;
1058                 iflib_softirq_alloc_generic(ctx,
1059                     &adapter->rx_queues[tx_que->msix].que_irq,
1060                     IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1061         }
1062         rid = vector + 1;
1063         error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1064             IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1065         if (error) {
1066                 device_printf(iflib_get_dev(ctx),
1067                     "Failed to register admin handler");
1068                 return (error);
1069         }
1070
1071         adapter->vector = vector;
1072         /*
1073          * Due to a broken design QEMU will fail to properly
1074          * enable the guest for MSIX unless the vectors in
1075          * the table are all set up, so we must rewrite the
1076          * ENABLE in the MSIX control register again at this
1077          * point to cause it to successfully initialize us.
1078          */
1079         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1080                 int msix_ctrl;
1081                 pci_find_cap(dev, PCIY_MSIX, &rid);
1082                 rid += PCIR_MSIX_CTRL;
1083                 msix_ctrl = pci_read_config(dev, rid, 2);
1084                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1085                 pci_write_config(dev, rid, msix_ctrl, 2);
1086         }
1087
1088         return (0);
1089
1090 fail:
1091         iflib_irq_free(ctx, &adapter->irq);
1092         rx_que = adapter->rx_queues;
1093         for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1094                 iflib_irq_free(ctx, &rx_que->que_irq);
1095
1096         return (error);
1097 } /* ixv_if_msix_intr_assign */
1098
1099 /************************************************************************
1100  * ixv_allocate_pci_resources
1101  ************************************************************************/
1102 static int
1103 ixv_allocate_pci_resources(if_ctx_t ctx)
1104 {
1105         struct adapter *adapter = iflib_get_softc(ctx);
1106         device_t       dev = iflib_get_dev(ctx);
1107         int            rid;
1108
1109         rid = PCIR_BAR(0);
1110         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1111             RF_ACTIVE);
1112
1113         if (!(adapter->pci_mem)) {
1114                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1115                 return (ENXIO);
1116         }
1117
1118         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1119         adapter->osdep.mem_bus_space_handle =
1120             rman_get_bushandle(adapter->pci_mem);
1121         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1122
1123         return (0);
1124 } /* ixv_allocate_pci_resources */
1125
1126 /************************************************************************
1127  * ixv_free_pci_resources
1128  ************************************************************************/
1129 static void
1130 ixv_free_pci_resources(if_ctx_t ctx)
1131 {
1132         struct adapter     *adapter = iflib_get_softc(ctx);
1133         struct ix_rx_queue *que = adapter->rx_queues;
1134         device_t           dev = iflib_get_dev(ctx);
1135
1136         /* Release all msix queue resources */
1137         if (adapter->intr_type == IFLIB_INTR_MSIX)
1138                 iflib_irq_free(ctx, &adapter->irq);
1139
1140         if (que != NULL) {
1141                 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1142                         iflib_irq_free(ctx, &que->que_irq);
1143                 }
1144         }
1145
1146         /* Clean the Legacy or Link interrupt last */
1147         if (adapter->pci_mem != NULL)
1148                 bus_release_resource(dev, SYS_RES_MEMORY,
1149                                      PCIR_BAR(0), adapter->pci_mem);
1150 } /* ixv_free_pci_resources */
1151
1152 /************************************************************************
1153  * ixv_setup_interface
1154  *
1155  *   Setup networking device structure and register an interface.
1156  ************************************************************************/
1157 static int
1158 ixv_setup_interface(if_ctx_t ctx)
1159 {
1160         struct adapter *adapter = iflib_get_softc(ctx);
1161         if_softc_ctx_t scctx = adapter->shared;
1162         struct ifnet   *ifp = iflib_get_ifp(ctx);
1163
1164         INIT_DEBUGOUT("ixv_setup_interface: begin");
1165
1166         if_setbaudrate(ifp, IF_Gbps(10));
1167         ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1168
1169
1170         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1171         ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1172         ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1173
1174         return 0;
1175 } /* ixv_setup_interface */
1176
1177 /************************************************************************
1178  * ixv_if_get_counter
1179  ************************************************************************/
1180 static uint64_t
1181 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1182 {
1183         struct adapter *adapter = iflib_get_softc(ctx);
1184         if_t           ifp = iflib_get_ifp(ctx);
1185
1186         switch (cnt) {
1187         case IFCOUNTER_IPACKETS:
1188                 return (adapter->ipackets);
1189         case IFCOUNTER_OPACKETS:
1190                 return (adapter->opackets);
1191         case IFCOUNTER_IBYTES:
1192                 return (adapter->ibytes);
1193         case IFCOUNTER_OBYTES:
1194                 return (adapter->obytes);
1195         case IFCOUNTER_IMCASTS:
1196                 return (adapter->imcasts);
1197         default:
1198                 return (if_get_counter_default(ifp, cnt));
1199         }
1200 } /* ixv_if_get_counter */
1201
1202 /************************************************************************
1203  * ixv_initialize_transmit_units - Enable transmit unit.
1204  ************************************************************************/
1205 static void
1206 ixv_initialize_transmit_units(if_ctx_t ctx)
1207 {
1208         struct adapter     *adapter = iflib_get_softc(ctx);
1209         struct ixgbe_hw    *hw = &adapter->hw;
1210         if_softc_ctx_t     scctx = adapter->shared;
1211         struct ix_tx_queue *que = adapter->tx_queues;
1212         int                i;
1213
1214         for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1215                 struct tx_ring *txr = &que->txr;
1216                 u64            tdba = txr->tx_paddr;
1217                 u32            txctrl, txdctl;
1218                 int            j = txr->me;
1219
1220                 /* Set WTHRESH to 8, burst writeback */
1221                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1222                 txdctl |= (8 << 16);
1223                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1224
1225                 /* Set the HW Tx Head and Tail indices */
1226                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1227                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1228
1229                 /* Set Tx Tail register */
1230                 txr->tail = IXGBE_VFTDT(j);
1231
1232                 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
1233                 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1234                         txr->tx_rsq[k] = QIDX_INVALID;
1235
1236                 /* Set Ring parameters */
1237                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1238                     (tdba & 0x00000000ffffffffULL));
1239                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1240                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1241                     scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1242                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1243                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1244                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1245
1246                 /* Now enable */
1247                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1248                 txdctl |= IXGBE_TXDCTL_ENABLE;
1249                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1250         }
1251
1252         return;
1253 } /* ixv_initialize_transmit_units */
1254
1255 /************************************************************************
1256  * ixv_initialize_rss_mapping
1257  ************************************************************************/
1258 static void
1259 ixv_initialize_rss_mapping(struct adapter *adapter)
1260 {
1261         struct ixgbe_hw *hw = &adapter->hw;
1262         u32             reta = 0, mrqc, rss_key[10];
1263         int             queue_id;
1264         int             i, j;
1265         u32             rss_hash_config;
1266
1267         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1268                 /* Fetch the configured RSS key */
1269                 rss_getkey((uint8_t *)&rss_key);
1270         } else {
1271                 /* set up random bits */
1272                 arc4rand(&rss_key, sizeof(rss_key), 0);
1273         }
1274
1275         /* Now fill out hash function seeds */
1276         for (i = 0; i < 10; i++)
1277                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1278
1279         /* Set up the redirection table */
1280         for (i = 0, j = 0; i < 64; i++, j++) {
1281                 if (j == adapter->num_rx_queues)
1282                         j = 0;
1283
1284                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1285                         /*
1286                          * Fetch the RSS bucket id for the given indirection
1287                          * entry. Cap it at the number of configured buckets
1288                          * (which is num_rx_queues.)
1289                          */
1290                         queue_id = rss_get_indirection_to_bucket(i);
1291                         queue_id = queue_id % adapter->num_rx_queues;
1292                 } else
1293                         queue_id = j;
1294
1295                 /*
1296                  * The low 8 bits are for hash value (n+0);
1297                  * The next 8 bits are for hash value (n+1), etc.
1298                  */
1299                 reta >>= 8;
1300                 reta |= ((uint32_t)queue_id) << 24;
1301                 if ((i & 3) == 3) {
1302                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1303                         reta = 0;
1304                 }
1305         }
1306
1307         /* Perform hash on these packet types */
1308         if (adapter->feat_en & IXGBE_FEATURE_RSS)
1309                 rss_hash_config = rss_gethashconfig();
1310         else {
1311                 /*
1312                  * Disable UDP - IP fragments aren't currently being handled
1313                  * and so we end up with a mix of 2-tuple and 4-tuple
1314                  * traffic.
1315                  */
1316                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1317                                 | RSS_HASHTYPE_RSS_TCP_IPV4
1318                                 | RSS_HASHTYPE_RSS_IPV6
1319                                 | RSS_HASHTYPE_RSS_TCP_IPV6;
1320         }
1321
1322         mrqc = IXGBE_MRQC_RSSEN;
1323         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1324                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1325         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1326                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1327         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1328                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1329         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1330                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1331         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1332                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1333                     __func__);
1334         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1335                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1336                     __func__);
1337         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1338                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1339         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1340                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1341         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1342                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1343                     __func__);
1344         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1345 } /* ixv_initialize_rss_mapping */
1346
1347
1348 /************************************************************************
1349  * ixv_initialize_receive_units - Setup receive registers and features.
1350  ************************************************************************/
1351 static void
1352 ixv_initialize_receive_units(if_ctx_t ctx)
1353 {
1354         struct adapter     *adapter = iflib_get_softc(ctx);
1355         if_softc_ctx_t     scctx;
1356         struct ixgbe_hw    *hw = &adapter->hw;
1357         struct ifnet       *ifp = iflib_get_ifp(ctx);
1358         struct ix_rx_queue *que = adapter->rx_queues;
1359         u32                bufsz, psrtype;
1360
1361         if (ifp->if_mtu > ETHERMTU)
1362                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1363         else
1364                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1365
1366         psrtype = IXGBE_PSRTYPE_TCPHDR
1367                 | IXGBE_PSRTYPE_UDPHDR
1368                 | IXGBE_PSRTYPE_IPV4HDR
1369                 | IXGBE_PSRTYPE_IPV6HDR
1370                 | IXGBE_PSRTYPE_L2HDR;
1371
1372         if (adapter->num_rx_queues > 1)
1373                 psrtype |= 1 << 29;
1374
1375         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1376
1377         /* Tell PF our max_frame size */
1378         if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1379                 device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1380         }
1381         scctx = adapter->shared;
1382
1383         for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1384                 struct rx_ring *rxr = &que->rxr;
1385                 u64            rdba = rxr->rx_paddr;
1386                 u32            reg, rxdctl;
1387                 int            j = rxr->me;
1388
1389                 /* Disable the queue */
1390                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1391                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1392                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1393                 for (int k = 0; k < 10; k++) {
1394                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1395                             IXGBE_RXDCTL_ENABLE)
1396                                 msec_delay(1);
1397                         else
1398                                 break;
1399                 }
1400                 wmb();
1401                 /* Setup the Base and Length of the Rx Descriptor Ring */
1402                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1403                     (rdba & 0x00000000ffffffffULL));
1404                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1405                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1406                     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1407
1408                 /* Reset the ring indices */
1409                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1410                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1411
1412                 /* Set up the SRRCTL register */
1413                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1414                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1415                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1416                 reg |= bufsz;
1417                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1418                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1419
1420                 /* Capture Rx Tail index */
1421                 rxr->tail = IXGBE_VFRDT(rxr->me);
1422
1423                 /* Do the queue enabling last */
1424                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1425                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1426                 for (int l = 0; l < 10; l++) {
1427                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1428                             IXGBE_RXDCTL_ENABLE)
1429                                 break;
1430                         msec_delay(1);
1431                 }
1432                 wmb();
1433
1434                 /* Set the Tail Pointer */
1435 #ifdef DEV_NETMAP
1436                 /*
1437                  * In netmap mode, we must preserve the buffers made
1438                  * available to userspace before the if_init()
1439                  * (this is true by default on the TX side, because
1440                  * init makes all buffers available to userspace).
1441                  *
1442                  * netmap_reset() and the device specific routines
1443                  * (e.g. ixgbe_setup_receive_rings()) map these
1444                  * buffers at the end of the NIC ring, so here we
1445                  * must set the RDT (tail) register to make sure
1446                  * they are not overwritten.
1447                  *
1448                  * In this driver the NIC ring starts at RDH = 0,
1449                  * RDT points to the last slot available for reception (?),
1450                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1451                  */
1452                 if (ifp->if_capenable & IFCAP_NETMAP) {
1453                         struct netmap_adapter *na = NA(ifp);
1454                         struct netmap_kring *kring = na->rx_rings[j];
1455                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1456
1457                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1458                 } else
1459 #endif /* DEV_NETMAP */
1460                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1461                             scctx->isc_nrxd[0] - 1);
1462         }
1463
1464         ixv_initialize_rss_mapping(adapter);
1465 } /* ixv_initialize_receive_units */
1466
1467 /************************************************************************
1468  * ixv_setup_vlan_support
1469  ************************************************************************/
1470 static void
1471 ixv_setup_vlan_support(if_ctx_t ctx)
1472 {
1473         struct ifnet    *ifp = iflib_get_ifp(ctx);
1474         struct adapter  *adapter = iflib_get_softc(ctx);
1475         struct ixgbe_hw *hw = &adapter->hw;
1476         u32             ctrl, vid, vfta, retry;
1477
1478         /*
1479          * We get here thru if_init, meaning
1480          * a soft reset, this has already cleared
1481          * the VFTA and other state, so if there
1482          * have been no vlan's registered do nothing.
1483          */
1484         if (adapter->num_vlans == 0)
1485                 return;
1486
1487         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1488                 /* Enable the queues */
1489                 for (int i = 0; i < adapter->num_rx_queues; i++) {
1490                         ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1491                         ctrl |= IXGBE_RXDCTL_VME;
1492                         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1493                         /*
1494                          * Let Rx path know that it needs to store VLAN tag
1495                          * as part of extra mbuf info.
1496                          */
1497                         adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1498                 }
1499         }
1500
1501         /*
1502          * If filtering VLAN tags is disabled,
1503          * there is no need to fill VLAN Filter Table Array (VFTA).
1504          */
1505         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1506                 return;
1507
1508         /*
1509          * A soft reset zero's out the VFTA, so
1510          * we need to repopulate it now.
1511          */
1512         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1513                 if (ixv_shadow_vfta[i] == 0)
1514                         continue;
1515                 vfta = ixv_shadow_vfta[i];
1516                 /*
1517                  * Reconstruct the vlan id's
1518                  * based on the bits set in each
1519                  * of the array ints.
1520                  */
1521                 for (int j = 0; j < 32; j++) {
1522                         retry = 0;
1523                         if ((vfta & (1 << j)) == 0)
1524                                 continue;
1525                         vid = (i * 32) + j;
1526                         /* Call the shared code mailbox routine */
1527                         while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1528                                 if (++retry > 5)
1529                                         break;
1530                         }
1531                 }
1532         }
1533 } /* ixv_setup_vlan_support */
1534
1535 /************************************************************************
1536  * ixv_if_register_vlan
1537  *
1538  *   Run via a vlan config EVENT, it enables us to use the
1539  *   HW Filter table since we can get the vlan id. This just
1540  *   creates the entry in the soft version of the VFTA, init
1541  *   will repopulate the real table.
1542  ************************************************************************/
1543 static void
1544 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1545 {
1546         struct adapter *adapter = iflib_get_softc(ctx);
1547         u16            index, bit;
1548
1549         index = (vtag >> 5) & 0x7F;
1550         bit = vtag & 0x1F;
1551         ixv_shadow_vfta[index] |= (1 << bit);
1552         ++adapter->num_vlans;
1553 } /* ixv_if_register_vlan */
1554
1555 /************************************************************************
1556  * ixv_if_unregister_vlan
1557  *
1558  *   Run via a vlan unconfig EVENT, remove our entry
1559  *   in the soft vfta.
1560  ************************************************************************/
1561 static void
1562 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1563 {
1564         struct adapter *adapter = iflib_get_softc(ctx);
1565         u16            index, bit;
1566
1567         index = (vtag >> 5) & 0x7F;
1568         bit = vtag & 0x1F;
1569         ixv_shadow_vfta[index] &= ~(1 << bit);
1570         --adapter->num_vlans;
1571 } /* ixv_if_unregister_vlan */
1572
1573 /************************************************************************
1574  * ixv_if_enable_intr
1575  ************************************************************************/
1576 static void
1577 ixv_if_enable_intr(if_ctx_t ctx)
1578 {
1579         struct adapter  *adapter = iflib_get_softc(ctx);
1580         struct ixgbe_hw *hw = &adapter->hw;
1581         struct ix_rx_queue *que = adapter->rx_queues;
1582         u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1583
1584         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1585
1586         mask = IXGBE_EIMS_ENABLE_MASK;
1587         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1588         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1589
1590         for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1591                 ixv_enable_queue(adapter, que->msix);
1592
1593         IXGBE_WRITE_FLUSH(hw);
1594 } /* ixv_if_enable_intr */
1595
1596 /************************************************************************
1597  * ixv_if_disable_intr
1598  ************************************************************************/
1599 static void
1600 ixv_if_disable_intr(if_ctx_t ctx)
1601 {
1602         struct adapter *adapter = iflib_get_softc(ctx);
1603         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1604         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1605         IXGBE_WRITE_FLUSH(&adapter->hw);
1606 } /* ixv_if_disable_intr */
1607
1608 /************************************************************************
1609  * ixv_if_rx_queue_intr_enable
1610  ************************************************************************/
1611 static int
1612 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1613 {
1614         struct adapter  *adapter = iflib_get_softc(ctx);
1615         struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1616
1617         ixv_enable_queue(adapter, que->rxr.me);
1618
1619         return (0);
1620 } /* ixv_if_rx_queue_intr_enable */
1621
1622 /************************************************************************
1623  * ixv_set_ivar
1624  *
1625  *   Setup the correct IVAR register for a particular MSI-X interrupt
1626  *    - entry is the register array entry
1627  *    - vector is the MSI-X vector for this queue
1628  *    - type is RX/TX/MISC
1629  ************************************************************************/
1630 static void
1631 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1632 {
1633         struct ixgbe_hw *hw = &adapter->hw;
1634         u32             ivar, index;
1635
1636         vector |= IXGBE_IVAR_ALLOC_VAL;
1637
1638         if (type == -1) { /* MISC IVAR */
1639                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1640                 ivar &= ~0xFF;
1641                 ivar |= vector;
1642                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1643         } else {          /* RX/TX IVARS */
1644                 index = (16 * (entry & 1)) + (8 * type);
1645                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1646                 ivar &= ~(0xFF << index);
1647                 ivar |= (vector << index);
1648                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1649         }
1650 } /* ixv_set_ivar */
1651
1652 /************************************************************************
1653  * ixv_configure_ivars
1654  ************************************************************************/
1655 static void
1656 ixv_configure_ivars(struct adapter *adapter)
1657 {
1658         struct ix_rx_queue *que = adapter->rx_queues;
1659
1660         MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1661
1662         for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1663                 /* First the RX queue entry */
1664                 ixv_set_ivar(adapter, i, que->msix, 0);
1665                 /* ... and the TX */
1666                 ixv_set_ivar(adapter, i, que->msix, 1);
1667                 /* Set an initial value in EITR */
1668                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1669                     IXGBE_EITR_DEFAULT);
1670         }
1671
1672         /* For the mailbox interrupt */
1673         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1674 } /* ixv_configure_ivars */
1675
1676 /************************************************************************
1677  * ixv_save_stats
1678  *
1679  *   The VF stats registers never have a truly virgin
1680  *   starting point, so this routine tries to make an
1681  *   artificial one, marking ground zero on attach as
1682  *   it were.
1683  ************************************************************************/
1684 static void
1685 ixv_save_stats(struct adapter *adapter)
1686 {
1687         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1688                 adapter->stats.vf.saved_reset_vfgprc +=
1689                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1690                 adapter->stats.vf.saved_reset_vfgptc +=
1691                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1692                 adapter->stats.vf.saved_reset_vfgorc +=
1693                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1694                 adapter->stats.vf.saved_reset_vfgotc +=
1695                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1696                 adapter->stats.vf.saved_reset_vfmprc +=
1697                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1698         }
1699 } /* ixv_save_stats */
1700
1701 /************************************************************************
1702  * ixv_init_stats
1703  ************************************************************************/
1704 static void
1705 ixv_init_stats(struct adapter *adapter)
1706 {
1707         struct ixgbe_hw *hw = &adapter->hw;
1708
1709         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1710         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1711         adapter->stats.vf.last_vfgorc |=
1712             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1713
1714         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1715         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1716         adapter->stats.vf.last_vfgotc |=
1717             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1718
1719         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1720
1721         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1722         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1723         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1724         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1725         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1726 } /* ixv_init_stats */
1727
1728 #define UPDATE_STAT_32(reg, last, count)                \
1729 {                                                       \
1730         u32 current = IXGBE_READ_REG(hw, reg);          \
1731         if (current < last)                             \
1732                 count += 0x100000000LL;                 \
1733         last = current;                                 \
1734         count &= 0xFFFFFFFF00000000LL;                  \
1735         count |= current;                               \
1736 }
1737
1738 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1739 {                                                       \
1740         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1741         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1742         u64 current = ((cur_msb << 32) | cur_lsb);      \
1743         if (current < last)                             \
1744                 count += 0x1000000000LL;                \
1745         last = current;                                 \
1746         count &= 0xFFFFFFF000000000LL;                  \
1747         count |= current;                               \
1748 }
1749
1750 /************************************************************************
1751  * ixv_update_stats - Update the board statistics counters.
1752  ************************************************************************/
1753 void
1754 ixv_update_stats(struct adapter *adapter)
1755 {
1756         struct ixgbe_hw *hw = &adapter->hw;
1757         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1758
1759         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1760             adapter->stats.vf.vfgprc);
1761         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1762             adapter->stats.vf.vfgptc);
1763         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1764             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1765         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1766             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1767         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1768             adapter->stats.vf.vfmprc);
1769
1770         /* Fill out the OS statistics structure */
1771         IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1772         IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1773         IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1774         IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1775         IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1776 } /* ixv_update_stats */
1777
1778 /************************************************************************
1779  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1780  ************************************************************************/
1781 static void
1782 ixv_add_stats_sysctls(struct adapter *adapter)
1783 {
1784         device_t                dev = adapter->dev;
1785         struct ix_tx_queue      *tx_que = adapter->tx_queues;
1786         struct ix_rx_queue      *rx_que = adapter->rx_queues;
1787         struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1788         struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1789         struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1790         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1791         struct sysctl_oid       *stat_node, *queue_node;
1792         struct sysctl_oid_list  *stat_list, *queue_list;
1793
1794 #define QUEUE_NAME_LEN 32
1795         char                    namebuf[QUEUE_NAME_LEN];
1796
1797         /* Driver Statistics */
1798         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1799             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1800         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1801             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1802
1803         for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1804                 struct tx_ring *txr = &tx_que->txr;
1805                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1806                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1807                     CTLFLAG_RD, NULL, "Queue Name");
1808                 queue_list = SYSCTL_CHILDREN(queue_node);
1809
1810                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1811                     CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1812                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1813                     CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1814         }
1815
1816         for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1817                 struct rx_ring *rxr = &rx_que->rxr;
1818                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1819                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1820                     CTLFLAG_RD, NULL, "Queue Name");
1821                 queue_list = SYSCTL_CHILDREN(queue_node);
1822
1823                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1824                     CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1825                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1826                     CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1827                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1828                     CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1829                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1830                     CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1831         }
1832
1833         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1834             CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1835         stat_list = SYSCTL_CHILDREN(stat_node);
1836
1837         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1838             CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1839         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1840             CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1841         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1842             CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1843         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1844             CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1845         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1846             CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1847 } /* ixv_add_stats_sysctls */
1848
1849 /************************************************************************
1850  * ixv_print_debug_info
1851  *
1852  *   Called only when em_display_debug_stats is enabled.
1853  *   Provides a way to take a look at important statistics
1854  *   maintained by the driver and hardware.
1855  ************************************************************************/
1856 static void
1857 ixv_print_debug_info(struct adapter *adapter)
1858 {
1859         device_t        dev = adapter->dev;
1860         struct ixgbe_hw *hw = &adapter->hw;
1861
1862         device_printf(dev, "Error Byte Count = %u \n",
1863             IXGBE_READ_REG(hw, IXGBE_ERRBC));
1864
1865         device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1866 } /* ixv_print_debug_info */
1867
1868 /************************************************************************
1869  * ixv_sysctl_debug
1870  ************************************************************************/
1871 static int
1872 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1873 {
1874         struct adapter *adapter;
1875         int            error, result;
1876
1877         result = -1;
1878         error = sysctl_handle_int(oidp, &result, 0, req);
1879
1880         if (error || !req->newptr)
1881                 return (error);
1882
1883         if (result == 1) {
1884                 adapter = (struct adapter *)arg1;
1885                 ixv_print_debug_info(adapter);
1886         }
1887
1888         return error;
1889 } /* ixv_sysctl_debug */
1890
1891 /************************************************************************
1892  * ixv_init_device_features
1893  ************************************************************************/
1894 static void
1895 ixv_init_device_features(struct adapter *adapter)
1896 {
1897         adapter->feat_cap = IXGBE_FEATURE_NETMAP
1898                           | IXGBE_FEATURE_VF
1899                           | IXGBE_FEATURE_RSS
1900                           | IXGBE_FEATURE_LEGACY_TX;
1901
1902         /* A tad short on feature flags for VFs, atm. */
1903         switch (adapter->hw.mac.type) {
1904         case ixgbe_mac_82599_vf:
1905                 break;
1906         case ixgbe_mac_X540_vf:
1907                 break;
1908         case ixgbe_mac_X550_vf:
1909         case ixgbe_mac_X550EM_x_vf:
1910         case ixgbe_mac_X550EM_a_vf:
1911                 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1912                 break;
1913         default:
1914                 break;
1915         }
1916
1917         /* Enabled by default... */
1918         /* Is a virtual function (VF) */
1919         if (adapter->feat_cap & IXGBE_FEATURE_VF)
1920                 adapter->feat_en |= IXGBE_FEATURE_VF;
1921         /* Netmap */
1922         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1923                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1924         /* Receive-Side Scaling (RSS) */
1925         if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1926                 adapter->feat_en |= IXGBE_FEATURE_RSS;
1927         /* Needs advanced context descriptor regardless of offloads req'd */
1928         if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1929                 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1930 } /* ixv_init_device_features */
1931