]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ixgbe/if_ixv.c
zfs: merge OpenZFS master-9305ff2ed
[FreeBSD/FreeBSD.git] / sys / dev / ixgbe / if_ixv.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39
40 #include "ixgbe.h"
41 #include "ifdi_if.h"
42
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
45
46 /************************************************************************
47  * Driver version
48  ************************************************************************/
49 char ixv_driver_version[] = "2.0.1-k";
50
51 /************************************************************************
52  * PCI Device ID Table
53  *
54  *   Used by probe to select devices to load on
55  *   Last field stores an index into ixv_strings
56  *   Last entry must be all 0s
57  *
58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  ************************************************************************/
60 static pci_vendor_info_t ixv_vendor_info_array[] =
61 {
62         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66         PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
67         /* required last entry */
68 PVID_END
69 };
70
71 /************************************************************************
72  * Function prototypes
73  ************************************************************************/
74 static void     *ixv_register(device_t dev);
75 static int      ixv_if_attach_pre(if_ctx_t ctx);
76 static int      ixv_if_attach_post(if_ctx_t ctx);
77 static int      ixv_if_detach(if_ctx_t ctx);
78
79 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
80 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
82 static void     ixv_if_queues_free(if_ctx_t ctx);
83 static void     ixv_identify_hardware(if_ctx_t ctx);
84 static void     ixv_init_device_features(struct adapter *);
85 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
86 static void     ixv_free_pci_resources(if_ctx_t ctx);
87 static int      ixv_setup_interface(if_ctx_t ctx);
88 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
89 static int      ixv_if_media_change(if_ctx_t ctx);
90 static void     ixv_if_update_admin_status(if_ctx_t ctx);
91 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
92
93 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
94 static void     ixv_if_init(if_ctx_t ctx);
95 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
96 static void     ixv_if_stop(if_ctx_t ctx);
97 static int      ixv_negotiate_api(struct adapter *);
98
99 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
100 static void     ixv_initialize_receive_units(if_ctx_t ctx);
101 static void     ixv_initialize_rss_mapping(struct adapter *);
102
103 static void     ixv_setup_vlan_support(if_ctx_t ctx);
104 static void     ixv_configure_ivars(struct adapter *);
105 static void     ixv_if_enable_intr(if_ctx_t ctx);
106 static void     ixv_if_disable_intr(if_ctx_t ctx);
107 static void     ixv_if_multi_set(if_ctx_t ctx);
108
109 static void     ixv_if_register_vlan(if_ctx_t, u16);
110 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
111
112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static bool     ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
114
115 static void     ixv_save_stats(struct adapter *);
116 static void     ixv_init_stats(struct adapter *);
117 static void     ixv_update_stats(struct adapter *);
118 static void     ixv_add_stats_sysctls(struct adapter *adapter);
119
120 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
121 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
122
123 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124
125 /* The MSI-X Interrupt handlers */
126 static int      ixv_msix_que(void *);
127 static int      ixv_msix_mbx(void *);
128
129 /************************************************************************
130  * FreeBSD Device Interface Entry Points
131  ************************************************************************/
132 static device_method_t ixv_methods[] = {
133         /* Device interface */
134         DEVMETHOD(device_register, ixv_register),
135         DEVMETHOD(device_probe, iflib_device_probe),
136         DEVMETHOD(device_attach, iflib_device_attach),
137         DEVMETHOD(device_detach, iflib_device_detach),
138         DEVMETHOD(device_shutdown, iflib_device_shutdown),
139         DEVMETHOD_END
140 };
141
142 static driver_t ixv_driver = {
143         "ixv", ixv_methods, sizeof(struct adapter),
144 };
145
146 devclass_t ixv_devclass;
147 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
148 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
149 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
150 MODULE_DEPEND(ixv, pci, 1, 1, 1);
151 MODULE_DEPEND(ixv, ether, 1, 1, 1);
152
153 static device_method_t ixv_if_methods[] = {
154         DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155         DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156         DEVMETHOD(ifdi_detach, ixv_if_detach),
157         DEVMETHOD(ifdi_init, ixv_if_init),
158         DEVMETHOD(ifdi_stop, ixv_if_stop),
159         DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160         DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161         DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162         DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163         DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164         DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165         DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166         DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167         DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168         DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169         DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170         DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171         DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172         DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173         DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174         DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175         DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176         DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
177         DEVMETHOD_END
178 };
179
180 static driver_t ixv_if_driver = {
181   "ixv_if", ixv_if_methods, sizeof(struct adapter)
182 };
183
184 /*
185  * TUNEABLE PARAMETERS:
186  */
187
188 /* Flow control setting, default to full */
189 static int ixv_flow_control = ixgbe_fc_full;
190 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191
192 /*
193  * Header split: this causes the hardware to DMA
194  * the header into a separate mbuf from the payload,
195  * it can be a performance win in some workloads, but
196  * in others it actually hurts, its off by default.
197  */
198 static int ixv_header_split = FALSE;
199 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200
201 /*
202  * Shadow VFTA table, this is needed because
203  * the real filter table gets cleared during
204  * a soft reset and we need to repopulate it.
205  */
206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 extern struct if_txrx ixgbe_txrx;
208
209 static struct if_shared_ctx ixv_sctx_init = {
210         .isc_magic = IFLIB_MAGIC,
211         .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212         .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
213         .isc_tx_maxsegsize = PAGE_SIZE,
214         .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
215         .isc_tso_maxsegsize = PAGE_SIZE,
216         .isc_rx_maxsize = MJUM16BYTES,
217         .isc_rx_nsegments = 1,
218         .isc_rx_maxsegsize = MJUM16BYTES,
219         .isc_nfl = 1,
220         .isc_ntxqs = 1,
221         .isc_nrxqs = 1,
222         .isc_admin_intrcnt = 1,
223         .isc_vendor_info = ixv_vendor_info_array,
224         .isc_driver_version = ixv_driver_version,
225         .isc_driver = &ixv_if_driver,
226         .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
227
228         .isc_nrxd_min = {MIN_RXD},
229         .isc_ntxd_min = {MIN_TXD},
230         .isc_nrxd_max = {MAX_RXD},
231         .isc_ntxd_max = {MAX_TXD},
232         .isc_nrxd_default = {DEFAULT_RXD},
233         .isc_ntxd_default = {DEFAULT_TXD},
234 };
235
236 static void *
237 ixv_register(device_t dev)
238 {
239         return (&ixv_sctx_init);
240 }
241
242 /************************************************************************
243  * ixv_if_tx_queues_alloc
244  ************************************************************************/
245 static int
246 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247                        int ntxqs, int ntxqsets)
248 {
249         struct adapter     *adapter = iflib_get_softc(ctx);
250         if_softc_ctx_t     scctx = adapter->shared;
251         struct ix_tx_queue *que;
252         int                i, j, error;
253
254         MPASS(adapter->num_tx_queues == ntxqsets);
255         MPASS(ntxqs == 1);
256
257         /* Allocate queue structure memory */
258         adapter->tx_queues =
259             (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260                                          M_DEVBUF, M_NOWAIT | M_ZERO);
261         if (!adapter->tx_queues) {
262                 device_printf(iflib_get_dev(ctx),
263                     "Unable to allocate TX ring memory\n");
264                 return (ENOMEM);
265         }
266
267         for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
268                 struct tx_ring *txr = &que->txr;
269
270                 txr->me = i;
271                 txr->adapter =  que->adapter = adapter;
272
273                 /* Allocate report status array */
274                 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
275                         error = ENOMEM;
276                         goto fail;
277                 }
278                 for (j = 0; j < scctx->isc_ntxd[0]; j++)
279                         txr->tx_rsq[j] = QIDX_INVALID;
280                 /* get the virtual and physical address of the hardware queues */
281                 txr->tail = IXGBE_VFTDT(txr->me);
282                 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283                 txr->tx_paddr = paddrs[i*ntxqs];
284
285                 txr->bytes = 0;
286                 txr->total_packets = 0;
287
288         }
289
290         device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291             adapter->num_tx_queues);
292
293         return (0);
294
295  fail:
296         ixv_if_queues_free(ctx);
297
298         return (error);
299 } /* ixv_if_tx_queues_alloc */
300
301 /************************************************************************
302  * ixv_if_rx_queues_alloc
303  ************************************************************************/
304 static int
305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306                        int nrxqs, int nrxqsets)
307 {
308         struct adapter     *adapter = iflib_get_softc(ctx);
309         struct ix_rx_queue *que;
310         int                i, error;
311
312         MPASS(adapter->num_rx_queues == nrxqsets);
313         MPASS(nrxqs == 1);
314
315         /* Allocate queue structure memory */
316         adapter->rx_queues =
317             (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
318                                          M_DEVBUF, M_NOWAIT | M_ZERO);
319         if (!adapter->rx_queues) {
320                 device_printf(iflib_get_dev(ctx),
321                     "Unable to allocate TX ring memory\n");
322                 error = ENOMEM;
323                 goto fail;
324         }
325
326         for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
327                 struct rx_ring *rxr = &que->rxr;
328                 rxr->me = i;
329                 rxr->adapter = que->adapter = adapter;
330
331
332                 /* get the virtual and physical address of the hw queues */
333                 rxr->tail = IXGBE_VFRDT(rxr->me);
334                 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335                 rxr->rx_paddr = paddrs[i*nrxqs];
336                 rxr->bytes = 0;
337                 rxr->que = que;
338         }
339
340         device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341             adapter->num_rx_queues);
342
343         return (0);
344
345 fail:
346         ixv_if_queues_free(ctx);
347
348         return (error);
349 } /* ixv_if_rx_queues_alloc */
350
351 /************************************************************************
352  * ixv_if_queues_free
353  ************************************************************************/
354 static void
355 ixv_if_queues_free(if_ctx_t ctx)
356 {
357         struct adapter     *adapter = iflib_get_softc(ctx);
358         struct ix_tx_queue *que = adapter->tx_queues;
359         int                i;
360
361         if (que == NULL)
362                 goto free;
363
364         for (i = 0; i < adapter->num_tx_queues; i++, que++) {
365                 struct tx_ring *txr = &que->txr;
366                 if (txr->tx_rsq == NULL)
367                         break;
368
369                 free(txr->tx_rsq, M_DEVBUF);
370                 txr->tx_rsq = NULL;
371         }
372         if (adapter->tx_queues != NULL)
373                 free(adapter->tx_queues, M_DEVBUF);
374 free:
375         if (adapter->rx_queues != NULL)
376                 free(adapter->rx_queues, M_DEVBUF);
377         adapter->tx_queues = NULL;
378         adapter->rx_queues = NULL;
379 } /* ixv_if_queues_free */
380
381 /************************************************************************
382  * ixv_if_attach_pre - Device initialization routine
383  *
384  *   Called when the driver is being loaded.
385  *   Identifies the type of hardware, allocates all resources
386  *   and initializes the hardware.
387  *
388  *   return 0 on success, positive on failure
389  ************************************************************************/
390 static int
391 ixv_if_attach_pre(if_ctx_t ctx)
392 {
393         struct adapter  *adapter;
394         device_t        dev;
395         if_softc_ctx_t  scctx;
396         struct ixgbe_hw *hw;
397         int             error = 0;
398
399         INIT_DEBUGOUT("ixv_attach: begin");
400
401         /* Allocate, clear, and link in our adapter structure */
402         dev = iflib_get_dev(ctx);
403         adapter = iflib_get_softc(ctx);
404         adapter->dev = dev;
405         adapter->ctx = ctx;
406         adapter->hw.back = adapter;
407         scctx = adapter->shared = iflib_get_softc_ctx(ctx);
408         adapter->media = iflib_get_media(ctx);
409         hw = &adapter->hw;
410
411         /* Do base PCI setup - map BAR0 */
412         if (ixv_allocate_pci_resources(ctx)) {
413                 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
414                 error = ENXIO;
415                 goto err_out;
416         }
417
418         /* SYSCTL APIs */
419         SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421             CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
422             adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
423
424         /* Determine hardware revision */
425         ixv_identify_hardware(ctx);
426         ixv_init_device_features(adapter);
427
428         /* Initialize the shared code */
429         error = ixgbe_init_ops_vf(hw);
430         if (error) {
431                 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
432                 error = EIO;
433                 goto err_out;
434         }
435
436         /* Setup the mailbox */
437         ixgbe_init_mbx_params_vf(hw);
438
439         error = hw->mac.ops.reset_hw(hw);
440         if (error == IXGBE_ERR_RESET_FAILED)
441                 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442         else if (error)
443                 device_printf(dev, "...reset_hw() failed with error %d\n",
444                     error);
445         if (error) {
446                 error = EIO;
447                 goto err_out;
448         }
449
450         error = hw->mac.ops.init_hw(hw);
451         if (error) {
452                 device_printf(dev, "...init_hw() failed with error %d\n",
453                     error);
454                 error = EIO;
455                 goto err_out;
456         }
457
458         /* Negotiate mailbox API version */
459         error = ixv_negotiate_api(adapter);
460         if (error) {
461                 device_printf(dev,
462                     "Mailbox API negotiation failed during attach!\n");
463                 goto err_out;
464         }
465
466         /* If no mac address was assigned, make a random one */
467         if (!ixv_check_ether_addr(hw->mac.addr)) {
468                 u8 addr[ETHER_ADDR_LEN];
469                 arc4rand(&addr, sizeof(addr), 0);
470                 addr[0] &= 0xFE;
471                 addr[0] |= 0x02;
472                 bcopy(addr, hw->mac.addr, sizeof(addr));
473                 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
474         }
475
476         /* Most of the iflib initialization... */
477
478         iflib_set_mac(ctx, hw->mac.addr);
479         switch (adapter->hw.mac.type) {
480         case ixgbe_mac_X550_vf:
481         case ixgbe_mac_X550EM_x_vf:
482         case ixgbe_mac_X550EM_a_vf:
483                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
484                 break;
485         default:
486                 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
487         }
488         scctx->isc_txqsizes[0] =
489             roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
490             sizeof(u32), DBA_ALIGN);
491         scctx->isc_rxqsizes[0] =
492             roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
493             DBA_ALIGN);
494         /* XXX */
495         scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
496             CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
497         scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
498         scctx->isc_msix_bar = pci_msix_table_bar(dev);
499         scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
500         scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
501         scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
502
503         scctx->isc_txrx = &ixgbe_txrx;
504
505         /*
506          * Tell the upper layer(s) we support everything the PF
507          * driver does except...
508          *   Wake-on-LAN
509          */
510         scctx->isc_capabilities = IXGBE_CAPS;
511         scctx->isc_capabilities ^= IFCAP_WOL;
512         scctx->isc_capenable = scctx->isc_capabilities;
513
514         INIT_DEBUGOUT("ixv_if_attach_pre: end");
515
516         return (0);
517
518 err_out:
519         ixv_free_pci_resources(ctx);
520
521         return (error);
522 } /* ixv_if_attach_pre */
523
524 static int
525 ixv_if_attach_post(if_ctx_t ctx)
526 {
527         struct adapter *adapter = iflib_get_softc(ctx);
528         device_t       dev = iflib_get_dev(ctx);
529         int            error = 0;
530
531         /* Setup OS specific network interface */
532         error = ixv_setup_interface(ctx);
533         if (error) {
534                 device_printf(dev, "Interface setup failed: %d\n", error);
535                 goto end;
536         }
537
538         /* Do the stats setup */
539         ixv_save_stats(adapter);
540         ixv_init_stats(adapter);
541         ixv_add_stats_sysctls(adapter);
542
543 end:
544         return error;
545 } /* ixv_if_attach_post */
546
547 /************************************************************************
548  * ixv_detach - Device removal routine
549  *
550  *   Called when the driver is being removed.
551  *   Stops the adapter and deallocates all the resources
552  *   that were allocated for driver operation.
553  *
554  *   return 0 on success, positive on failure
555  ************************************************************************/
556 static int
557 ixv_if_detach(if_ctx_t ctx)
558 {
559         INIT_DEBUGOUT("ixv_detach: begin");
560
561         ixv_free_pci_resources(ctx);
562
563         return (0);
564 } /* ixv_if_detach */
565
566 /************************************************************************
567  * ixv_if_mtu_set
568  ************************************************************************/
569 static int
570 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
571 {
572         struct adapter *adapter = iflib_get_softc(ctx);
573         struct ifnet   *ifp = iflib_get_ifp(ctx);
574         int            error = 0;
575
576         IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
577         if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
578                 error = EINVAL;
579         } else {
580                 ifp->if_mtu = mtu;
581                 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
582         }
583
584         return error;
585 } /* ixv_if_mtu_set */
586
587 /************************************************************************
588  * ixv_if_init - Init entry point
589  *
590  *   Used in two ways: It is used by the stack as an init entry
591  *   point in network interface structure. It is also used
592  *   by the driver as a hw/sw initialization routine to get
593  *   to a consistent state.
594  *
595  *   return 0 on success, positive on failure
596  ************************************************************************/
597 static void
598 ixv_if_init(if_ctx_t ctx)
599 {
600         struct adapter  *adapter = iflib_get_softc(ctx);
601         struct ifnet    *ifp = iflib_get_ifp(ctx);
602         device_t        dev = iflib_get_dev(ctx);
603         struct ixgbe_hw *hw = &adapter->hw;
604         int             error = 0;
605
606         INIT_DEBUGOUT("ixv_if_init: begin");
607         hw->adapter_stopped = FALSE;
608         hw->mac.ops.stop_adapter(hw);
609
610         /* reprogram the RAR[0] in case user changed it. */
611         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
612
613         /* Get the latest mac address, User can use a LAA */
614         bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
615         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
616
617         /* Reset VF and renegotiate mailbox API version */
618         hw->mac.ops.reset_hw(hw);
619         hw->mac.ops.start_hw(hw);
620         error = ixv_negotiate_api(adapter);
621         if (error) {
622                 device_printf(dev,
623                     "Mailbox API negotiation failed in if_init!\n");
624                 return;
625         }
626
627         ixv_initialize_transmit_units(ctx);
628
629         /* Setup Multicast table */
630         ixv_if_multi_set(ctx);
631
632         adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
633
634         /* Configure RX settings */
635         ixv_initialize_receive_units(ctx);
636
637         /* Set up VLAN offload and filter */
638         ixv_setup_vlan_support(ctx);
639
640         /* Set up MSI-X routing */
641         ixv_configure_ivars(adapter);
642
643         /* Set up auto-mask */
644         IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
645
646         /* Set moderation on the Link interrupt */
647         IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
648
649         /* Stats init */
650         ixv_init_stats(adapter);
651
652         /* Config/Enable Link */
653         hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
654             FALSE);
655
656         /* And now turn on interrupts */
657         ixv_if_enable_intr(ctx);
658
659         return;
660 } /* ixv_if_init */
661
662 /************************************************************************
663  * ixv_enable_queue
664  ************************************************************************/
665 static inline void
666 ixv_enable_queue(struct adapter *adapter, u32 vector)
667 {
668         struct ixgbe_hw *hw = &adapter->hw;
669         u32             queue = 1 << vector;
670         u32             mask;
671
672         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
673         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
674 } /* ixv_enable_queue */
675
676 /************************************************************************
677  * ixv_disable_queue
678  ************************************************************************/
679 static inline void
680 ixv_disable_queue(struct adapter *adapter, u32 vector)
681 {
682         struct ixgbe_hw *hw = &adapter->hw;
683         u64             queue = (u64)(1 << vector);
684         u32             mask;
685
686         mask = (IXGBE_EIMS_RTX_QUEUE & queue);
687         IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
688 } /* ixv_disable_queue */
689
690
691 /************************************************************************
692  * ixv_msix_que - MSI-X Queue Interrupt Service routine
693  ************************************************************************/
694 static int
695 ixv_msix_que(void *arg)
696 {
697         struct ix_rx_queue *que = arg;
698         struct adapter     *adapter = que->adapter;
699
700         ixv_disable_queue(adapter, que->msix);
701         ++que->irqs;
702
703         return (FILTER_SCHEDULE_THREAD);
704 } /* ixv_msix_que */
705
706 /************************************************************************
707  * ixv_msix_mbx
708  ************************************************************************/
709 static int
710 ixv_msix_mbx(void *arg)
711 {
712         struct adapter  *adapter = arg;
713         struct ixgbe_hw *hw = &adapter->hw;
714         u32             reg;
715
716         ++adapter->link_irq;
717
718         /* First get the cause */
719         reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
720         /* Clear interrupt with write */
721         IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
722
723         /* Link status change */
724         if (reg & IXGBE_EICR_LSC)
725                 iflib_admin_intr_deferred(adapter->ctx);
726
727         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
728
729         return (FILTER_HANDLED);
730 } /* ixv_msix_mbx */
731
732 /************************************************************************
733  * ixv_media_status - Media Ioctl callback
734  *
735  *   Called whenever the user queries the status of
736  *   the interface using ifconfig.
737  ************************************************************************/
738 static void
739 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
740 {
741         struct adapter *adapter = iflib_get_softc(ctx);
742
743         INIT_DEBUGOUT("ixv_media_status: begin");
744
745         iflib_admin_intr_deferred(ctx);
746
747         ifmr->ifm_status = IFM_AVALID;
748         ifmr->ifm_active = IFM_ETHER;
749
750         if (!adapter->link_active)
751                 return;
752
753         ifmr->ifm_status |= IFM_ACTIVE;
754
755         switch (adapter->link_speed) {
756                 case IXGBE_LINK_SPEED_1GB_FULL:
757                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
758                         break;
759                 case IXGBE_LINK_SPEED_10GB_FULL:
760                         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
761                         break;
762                 case IXGBE_LINK_SPEED_100_FULL:
763                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
764                         break;
765                 case IXGBE_LINK_SPEED_10_FULL:
766                         ifmr->ifm_active |= IFM_10_T | IFM_FDX;
767                         break;
768         }
769 } /* ixv_if_media_status */
770
771 /************************************************************************
772  * ixv_if_media_change - Media Ioctl callback
773  *
774  *   Called when the user changes speed/duplex using
775  *   media/mediopt option with ifconfig.
776  ************************************************************************/
777 static int
778 ixv_if_media_change(if_ctx_t ctx)
779 {
780         struct adapter *adapter = iflib_get_softc(ctx);
781         struct ifmedia *ifm = iflib_get_media(ctx);
782
783         INIT_DEBUGOUT("ixv_media_change: begin");
784
785         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
786                 return (EINVAL);
787
788         switch (IFM_SUBTYPE(ifm->ifm_media)) {
789         case IFM_AUTO:
790                 break;
791         default:
792                 device_printf(adapter->dev, "Only auto media type\n");
793                 return (EINVAL);
794         }
795
796         return (0);
797 } /* ixv_if_media_change */
798
799
800 /************************************************************************
801  * ixv_negotiate_api
802  *
803  *   Negotiate the Mailbox API with the PF;
804  *   start with the most featured API first.
805  ************************************************************************/
806 static int
807 ixv_negotiate_api(struct adapter *adapter)
808 {
809         struct ixgbe_hw *hw = &adapter->hw;
810         int             mbx_api[] = { ixgbe_mbox_api_11,
811                                       ixgbe_mbox_api_10,
812                                       ixgbe_mbox_api_unknown };
813         int             i = 0;
814
815         while (mbx_api[i] != ixgbe_mbox_api_unknown) {
816                 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
817                         return (0);
818                 i++;
819         }
820
821         return (EINVAL);
822 } /* ixv_negotiate_api */
823
824
825 /************************************************************************
826  * ixv_if_multi_set - Multicast Update
827  *
828  *   Called whenever multicast address list is updated.
829  ************************************************************************/
830 static void
831 ixv_if_multi_set(if_ctx_t ctx)
832 {
833         u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
834         struct adapter     *adapter = iflib_get_softc(ctx);
835         u8                 *update_ptr;
836         struct ifmultiaddr *ifma;
837         if_t               ifp = iflib_get_ifp(ctx);
838         int                mcnt = 0;
839
840         IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
841
842         CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
843                 if (ifma->ifma_addr->sa_family != AF_LINK)
844                         continue;
845                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
846                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
847                     IXGBE_ETH_LENGTH_OF_ADDRESS);
848                 mcnt++;
849         }
850
851         update_ptr = mta;
852
853         adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
854             ixv_mc_array_itr, TRUE);
855 } /* ixv_if_multi_set */
856
857 /************************************************************************
858  * ixv_mc_array_itr
859  *
860  *   An iterator function needed by the multicast shared code.
861  *   It feeds the shared code routine the addresses in the
862  *   array of ixv_set_multi() one by one.
863  ************************************************************************/
864 static u8 *
865 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
866 {
867         u8 *addr = *update_ptr;
868         u8 *newptr;
869
870         *vmdq = 0;
871
872         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
873         *update_ptr = newptr;
874
875         return addr;
876 } /* ixv_mc_array_itr */
877
878 /************************************************************************
879  * ixv_if_local_timer - Timer routine
880  *
881  *   Checks for link status, updates statistics,
882  *   and runs the watchdog check.
883  ************************************************************************/
884 static void
885 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
886 {
887         if (qid != 0)
888                 return;
889
890         /* Fire off the adminq task */
891         iflib_admin_intr_deferred(ctx);
892 } /* ixv_if_local_timer */
893
894 /************************************************************************
895  * ixv_if_update_admin_status - Update OS on link state
896  *
897  * Note: Only updates the OS on the cached link state.
898  *       The real check of the hardware only happens with
899  *       a link interrupt.
900  ************************************************************************/
901 static void
902 ixv_if_update_admin_status(if_ctx_t ctx)
903 {
904         struct adapter *adapter = iflib_get_softc(ctx);
905         device_t       dev = iflib_get_dev(ctx);
906         s32            status;
907
908         adapter->hw.mac.get_link_status = TRUE;
909
910         status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
911             &adapter->link_up, FALSE);
912
913         if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
914                 /* Mailbox's Clear To Send status is lost or timeout occurred.
915                  * We need reinitialization. */
916                 iflib_get_ifp(ctx)->if_init(ctx);
917         }
918
919         if (adapter->link_up) {
920                 if (adapter->link_active == FALSE) {
921                         if (bootverbose)
922                                 device_printf(dev, "Link is up %d Gbps %s \n",
923                                     ((adapter->link_speed == 128) ? 10 : 1),
924                                     "Full Duplex");
925                         adapter->link_active = TRUE;
926                         iflib_link_state_change(ctx, LINK_STATE_UP,
927                             IF_Gbps(10));
928                 }
929         } else { /* Link down */
930                 if (adapter->link_active == TRUE) {
931                         if (bootverbose)
932                                 device_printf(dev, "Link is Down\n");
933                         iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
934                         adapter->link_active = FALSE;
935                 }
936         }
937
938         /* Stats Update */
939         ixv_update_stats(adapter);
940 } /* ixv_if_update_admin_status */
941
942
943 /************************************************************************
944  * ixv_if_stop - Stop the hardware
945  *
946  *   Disables all traffic on the adapter by issuing a
947  *   global reset on the MAC and deallocates TX/RX buffers.
948  ************************************************************************/
949 static void
950 ixv_if_stop(if_ctx_t ctx)
951 {
952         struct adapter  *adapter = iflib_get_softc(ctx);
953         struct ixgbe_hw *hw = &adapter->hw;
954
955         INIT_DEBUGOUT("ixv_stop: begin\n");
956
957         ixv_if_disable_intr(ctx);
958
959         hw->mac.ops.reset_hw(hw);
960         adapter->hw.adapter_stopped = FALSE;
961         hw->mac.ops.stop_adapter(hw);
962
963         /* Update the stack */
964         adapter->link_up = FALSE;
965         ixv_if_update_admin_status(ctx);
966
967         /* reprogram the RAR[0] in case user changed it. */
968         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
969 } /* ixv_if_stop */
970
971
972 /************************************************************************
973  * ixv_identify_hardware - Determine hardware revision.
974  ************************************************************************/
975 static void
976 ixv_identify_hardware(if_ctx_t ctx)
977 {
978         struct adapter  *adapter = iflib_get_softc(ctx);
979         device_t        dev = iflib_get_dev(ctx);
980         struct ixgbe_hw *hw = &adapter->hw;
981
982         /* Save off the information about this board */
983         hw->vendor_id = pci_get_vendor(dev);
984         hw->device_id = pci_get_device(dev);
985         hw->revision_id = pci_get_revid(dev);
986         hw->subsystem_vendor_id = pci_get_subvendor(dev);
987         hw->subsystem_device_id = pci_get_subdevice(dev);
988
989         /* A subset of set_mac_type */
990         switch (hw->device_id) {
991         case IXGBE_DEV_ID_82599_VF:
992                 hw->mac.type = ixgbe_mac_82599_vf;
993                 break;
994         case IXGBE_DEV_ID_X540_VF:
995                 hw->mac.type = ixgbe_mac_X540_vf;
996                 break;
997         case IXGBE_DEV_ID_X550_VF:
998                 hw->mac.type = ixgbe_mac_X550_vf;
999                 break;
1000         case IXGBE_DEV_ID_X550EM_X_VF:
1001                 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1002                 break;
1003         case IXGBE_DEV_ID_X550EM_A_VF:
1004                 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1005                 break;
1006         default:
1007                 device_printf(dev, "unknown mac type\n");
1008                 hw->mac.type = ixgbe_mac_unknown;
1009                 break;
1010         }
1011 } /* ixv_identify_hardware */
1012
1013 /************************************************************************
1014  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1015  ************************************************************************/
1016 static int
1017 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1018 {
1019         struct adapter     *adapter = iflib_get_softc(ctx);
1020         device_t           dev = iflib_get_dev(ctx);
1021         struct ix_rx_queue *rx_que = adapter->rx_queues;
1022         struct ix_tx_queue *tx_que;
1023         int                error, rid, vector = 0;
1024         char               buf[16];
1025
1026         for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1027                 rid = vector + 1;
1028
1029                 snprintf(buf, sizeof(buf), "rxq%d", i);
1030                 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1031                     IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1032
1033                 if (error) {
1034                         device_printf(iflib_get_dev(ctx),
1035                             "Failed to allocate que int %d err: %d", i, error);
1036                         adapter->num_rx_queues = i + 1;
1037                         goto fail;
1038                 }
1039
1040                 rx_que->msix = vector;
1041         }
1042
1043         for (int i = 0; i < adapter->num_tx_queues; i++) {
1044                 snprintf(buf, sizeof(buf), "txq%d", i);
1045                 tx_que = &adapter->tx_queues[i];
1046                 tx_que->msix = i % adapter->num_rx_queues;
1047                 iflib_softirq_alloc_generic(ctx,
1048                     &adapter->rx_queues[tx_que->msix].que_irq,
1049                     IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1050         }
1051         rid = vector + 1;
1052         error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1053             IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1054         if (error) {
1055                 device_printf(iflib_get_dev(ctx),
1056                     "Failed to register admin handler");
1057                 return (error);
1058         }
1059
1060         adapter->vector = vector;
1061         /*
1062          * Due to a broken design QEMU will fail to properly
1063          * enable the guest for MSIX unless the vectors in
1064          * the table are all set up, so we must rewrite the
1065          * ENABLE in the MSIX control register again at this
1066          * point to cause it to successfully initialize us.
1067          */
1068         if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1069                 int msix_ctrl;
1070                 pci_find_cap(dev, PCIY_MSIX, &rid);
1071                 rid += PCIR_MSIX_CTRL;
1072                 msix_ctrl = pci_read_config(dev, rid, 2);
1073                 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1074                 pci_write_config(dev, rid, msix_ctrl, 2);
1075         }
1076
1077         return (0);
1078
1079 fail:
1080         iflib_irq_free(ctx, &adapter->irq);
1081         rx_que = adapter->rx_queues;
1082         for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1083                 iflib_irq_free(ctx, &rx_que->que_irq);
1084
1085         return (error);
1086 } /* ixv_if_msix_intr_assign */
1087
1088 /************************************************************************
1089  * ixv_allocate_pci_resources
1090  ************************************************************************/
1091 static int
1092 ixv_allocate_pci_resources(if_ctx_t ctx)
1093 {
1094         struct adapter *adapter = iflib_get_softc(ctx);
1095         device_t       dev = iflib_get_dev(ctx);
1096         int            rid;
1097
1098         rid = PCIR_BAR(0);
1099         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1100             RF_ACTIVE);
1101
1102         if (!(adapter->pci_mem)) {
1103                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1104                 return (ENXIO);
1105         }
1106
1107         adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1108         adapter->osdep.mem_bus_space_handle =
1109             rman_get_bushandle(adapter->pci_mem);
1110         adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1111
1112         return (0);
1113 } /* ixv_allocate_pci_resources */
1114
1115 /************************************************************************
1116  * ixv_free_pci_resources
1117  ************************************************************************/
1118 static void
1119 ixv_free_pci_resources(if_ctx_t ctx)
1120 {
1121         struct adapter     *adapter = iflib_get_softc(ctx);
1122         struct ix_rx_queue *que = adapter->rx_queues;
1123         device_t           dev = iflib_get_dev(ctx);
1124
1125         /* Release all MSI-X queue resources */
1126         if (adapter->intr_type == IFLIB_INTR_MSIX)
1127                 iflib_irq_free(ctx, &adapter->irq);
1128
1129         if (que != NULL) {
1130                 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1131                         iflib_irq_free(ctx, &que->que_irq);
1132                 }
1133         }
1134
1135         if (adapter->pci_mem != NULL)
1136                 bus_release_resource(dev, SYS_RES_MEMORY,
1137                     rman_get_rid(adapter->pci_mem), adapter->pci_mem);
1138 } /* ixv_free_pci_resources */
1139
1140 /************************************************************************
1141  * ixv_setup_interface
1142  *
1143  *   Setup networking device structure and register an interface.
1144  ************************************************************************/
1145 static int
1146 ixv_setup_interface(if_ctx_t ctx)
1147 {
1148         struct adapter *adapter = iflib_get_softc(ctx);
1149         if_softc_ctx_t scctx = adapter->shared;
1150         struct ifnet   *ifp = iflib_get_ifp(ctx);
1151
1152         INIT_DEBUGOUT("ixv_setup_interface: begin");
1153
1154         if_setbaudrate(ifp, IF_Gbps(10));
1155         ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1156
1157
1158         adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1159         ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1160         ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1161
1162         return 0;
1163 } /* ixv_setup_interface */
1164
1165 /************************************************************************
1166  * ixv_if_get_counter
1167  ************************************************************************/
1168 static uint64_t
1169 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1170 {
1171         struct adapter *adapter = iflib_get_softc(ctx);
1172         if_t           ifp = iflib_get_ifp(ctx);
1173
1174         switch (cnt) {
1175         case IFCOUNTER_IPACKETS:
1176                 return (adapter->ipackets);
1177         case IFCOUNTER_OPACKETS:
1178                 return (adapter->opackets);
1179         case IFCOUNTER_IBYTES:
1180                 return (adapter->ibytes);
1181         case IFCOUNTER_OBYTES:
1182                 return (adapter->obytes);
1183         case IFCOUNTER_IMCASTS:
1184                 return (adapter->imcasts);
1185         default:
1186                 return (if_get_counter_default(ifp, cnt));
1187         }
1188 } /* ixv_if_get_counter */
1189
1190 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1191  * @ctx: iflib context
1192  * @event: event code to check
1193  *
1194  * Defaults to returning true for every event.
1195  *
1196  * @returns true if iflib needs to reinit the interface
1197  */
1198 static bool
1199 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1200 {
1201         switch (event) {
1202         case IFLIB_RESTART_VLAN_CONFIG:
1203                 /* XXX: This may not need to return true */
1204         default:
1205                 return (true);
1206         }
1207 }
1208
1209 /************************************************************************
1210  * ixv_initialize_transmit_units - Enable transmit unit.
1211  ************************************************************************/
1212 static void
1213 ixv_initialize_transmit_units(if_ctx_t ctx)
1214 {
1215         struct adapter     *adapter = iflib_get_softc(ctx);
1216         struct ixgbe_hw    *hw = &adapter->hw;
1217         if_softc_ctx_t     scctx = adapter->shared;
1218         struct ix_tx_queue *que = adapter->tx_queues;
1219         int                i;
1220
1221         for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1222                 struct tx_ring *txr = &que->txr;
1223                 u64            tdba = txr->tx_paddr;
1224                 u32            txctrl, txdctl;
1225                 int            j = txr->me;
1226
1227                 /* Set WTHRESH to 8, burst writeback */
1228                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1229                 txdctl |= (8 << 16);
1230                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1231
1232                 /* Set the HW Tx Head and Tail indices */
1233                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1234                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1235
1236                 /* Set Tx Tail register */
1237                 txr->tail = IXGBE_VFTDT(j);
1238
1239                 txr->tx_rs_cidx = txr->tx_rs_pidx;
1240                 /* Initialize the last processed descriptor to be the end of
1241                  * the ring, rather than the start, so that we avoid an
1242                  * off-by-one error when calculating how many descriptors are
1243                  * done in the credits_update function.
1244                  */
1245                 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1246                 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1247                         txr->tx_rsq[k] = QIDX_INVALID;
1248
1249                 /* Set Ring parameters */
1250                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1251                     (tdba & 0x00000000ffffffffULL));
1252                 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1253                 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1254                     scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1255                 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1256                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1257                 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1258
1259                 /* Now enable */
1260                 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1261                 txdctl |= IXGBE_TXDCTL_ENABLE;
1262                 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1263         }
1264
1265         return;
1266 } /* ixv_initialize_transmit_units */
1267
1268 /************************************************************************
1269  * ixv_initialize_rss_mapping
1270  ************************************************************************/
1271 static void
1272 ixv_initialize_rss_mapping(struct adapter *adapter)
1273 {
1274         struct ixgbe_hw *hw = &adapter->hw;
1275         u32             reta = 0, mrqc, rss_key[10];
1276         int             queue_id;
1277         int             i, j;
1278         u32             rss_hash_config;
1279
1280         if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1281                 /* Fetch the configured RSS key */
1282                 rss_getkey((uint8_t *)&rss_key);
1283         } else {
1284                 /* set up random bits */
1285                 arc4rand(&rss_key, sizeof(rss_key), 0);
1286         }
1287
1288         /* Now fill out hash function seeds */
1289         for (i = 0; i < 10; i++)
1290                 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1291
1292         /* Set up the redirection table */
1293         for (i = 0, j = 0; i < 64; i++, j++) {
1294                 if (j == adapter->num_rx_queues)
1295                         j = 0;
1296
1297                 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1298                         /*
1299                          * Fetch the RSS bucket id for the given indirection
1300                          * entry. Cap it at the number of configured buckets
1301                          * (which is num_rx_queues.)
1302                          */
1303                         queue_id = rss_get_indirection_to_bucket(i);
1304                         queue_id = queue_id % adapter->num_rx_queues;
1305                 } else
1306                         queue_id = j;
1307
1308                 /*
1309                  * The low 8 bits are for hash value (n+0);
1310                  * The next 8 bits are for hash value (n+1), etc.
1311                  */
1312                 reta >>= 8;
1313                 reta |= ((uint32_t)queue_id) << 24;
1314                 if ((i & 3) == 3) {
1315                         IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1316                         reta = 0;
1317                 }
1318         }
1319
1320         /* Perform hash on these packet types */
1321         if (adapter->feat_en & IXGBE_FEATURE_RSS)
1322                 rss_hash_config = rss_gethashconfig();
1323         else {
1324                 /*
1325                  * Disable UDP - IP fragments aren't currently being handled
1326                  * and so we end up with a mix of 2-tuple and 4-tuple
1327                  * traffic.
1328                  */
1329                 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1330                                 | RSS_HASHTYPE_RSS_TCP_IPV4
1331                                 | RSS_HASHTYPE_RSS_IPV6
1332                                 | RSS_HASHTYPE_RSS_TCP_IPV6;
1333         }
1334
1335         mrqc = IXGBE_MRQC_RSSEN;
1336         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1337                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1338         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1339                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1340         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1341                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1342         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1343                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1344         if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1345                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1346                     __func__);
1347         if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1348                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1349                     __func__);
1350         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1351                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1352         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1353                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1354         if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1355                 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1356                     __func__);
1357         IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1358 } /* ixv_initialize_rss_mapping */
1359
1360
1361 /************************************************************************
1362  * ixv_initialize_receive_units - Setup receive registers and features.
1363  ************************************************************************/
1364 static void
1365 ixv_initialize_receive_units(if_ctx_t ctx)
1366 {
1367         struct adapter     *adapter = iflib_get_softc(ctx);
1368         if_softc_ctx_t     scctx;
1369         struct ixgbe_hw    *hw = &adapter->hw;
1370         struct ifnet       *ifp = iflib_get_ifp(ctx);
1371         struct ix_rx_queue *que = adapter->rx_queues;
1372         u32                bufsz, psrtype;
1373
1374         if (ifp->if_mtu > ETHERMTU)
1375                 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1376         else
1377                 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1378
1379         psrtype = IXGBE_PSRTYPE_TCPHDR
1380                 | IXGBE_PSRTYPE_UDPHDR
1381                 | IXGBE_PSRTYPE_IPV4HDR
1382                 | IXGBE_PSRTYPE_IPV6HDR
1383                 | IXGBE_PSRTYPE_L2HDR;
1384
1385         if (adapter->num_rx_queues > 1)
1386                 psrtype |= 1 << 29;
1387
1388         IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1389
1390         /* Tell PF our max_frame size */
1391         if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1392                 device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1393         }
1394         scctx = adapter->shared;
1395
1396         for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1397                 struct rx_ring *rxr = &que->rxr;
1398                 u64            rdba = rxr->rx_paddr;
1399                 u32            reg, rxdctl;
1400                 int            j = rxr->me;
1401
1402                 /* Disable the queue */
1403                 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1404                 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1405                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1406                 for (int k = 0; k < 10; k++) {
1407                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1408                             IXGBE_RXDCTL_ENABLE)
1409                                 msec_delay(1);
1410                         else
1411                                 break;
1412                 }
1413                 wmb();
1414                 /* Setup the Base and Length of the Rx Descriptor Ring */
1415                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1416                     (rdba & 0x00000000ffffffffULL));
1417                 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1418                 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1419                     scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1420
1421                 /* Reset the ring indices */
1422                 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1423                 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1424
1425                 /* Set up the SRRCTL register */
1426                 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1427                 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1428                 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1429                 reg |= bufsz;
1430                 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1431                 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1432
1433                 /* Capture Rx Tail index */
1434                 rxr->tail = IXGBE_VFRDT(rxr->me);
1435
1436                 /* Do the queue enabling last */
1437                 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1438                 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1439                 for (int l = 0; l < 10; l++) {
1440                         if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1441                             IXGBE_RXDCTL_ENABLE)
1442                                 break;
1443                         msec_delay(1);
1444                 }
1445                 wmb();
1446
1447                 /* Set the Tail Pointer */
1448 #ifdef DEV_NETMAP
1449                 /*
1450                  * In netmap mode, we must preserve the buffers made
1451                  * available to userspace before the if_init()
1452                  * (this is true by default on the TX side, because
1453                  * init makes all buffers available to userspace).
1454                  *
1455                  * netmap_reset() and the device specific routines
1456                  * (e.g. ixgbe_setup_receive_rings()) map these
1457                  * buffers at the end of the NIC ring, so here we
1458                  * must set the RDT (tail) register to make sure
1459                  * they are not overwritten.
1460                  *
1461                  * In this driver the NIC ring starts at RDH = 0,
1462                  * RDT points to the last slot available for reception (?),
1463                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1464                  */
1465                 if (ifp->if_capenable & IFCAP_NETMAP) {
1466                         struct netmap_adapter *na = NA(ifp);
1467                         struct netmap_kring *kring = na->rx_rings[j];
1468                         int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1469
1470                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1471                 } else
1472 #endif /* DEV_NETMAP */
1473                         IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1474                             scctx->isc_nrxd[0] - 1);
1475         }
1476
1477         /*
1478          * Do not touch RSS and RETA settings for older hardware
1479          * as those are shared among PF and all VF.
1480          */
1481         if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1482                 ixv_initialize_rss_mapping(adapter);
1483 } /* ixv_initialize_receive_units */
1484
1485 /************************************************************************
1486  * ixv_setup_vlan_support
1487  ************************************************************************/
1488 static void
1489 ixv_setup_vlan_support(if_ctx_t ctx)
1490 {
1491         struct ifnet    *ifp = iflib_get_ifp(ctx);
1492         struct adapter  *adapter = iflib_get_softc(ctx);
1493         struct ixgbe_hw *hw = &adapter->hw;
1494         u32             ctrl, vid, vfta, retry;
1495
1496         /*
1497          * We get here thru if_init, meaning
1498          * a soft reset, this has already cleared
1499          * the VFTA and other state, so if there
1500          * have been no vlan's registered do nothing.
1501          */
1502         if (adapter->num_vlans == 0)
1503                 return;
1504
1505         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1506                 /* Enable the queues */
1507                 for (int i = 0; i < adapter->num_rx_queues; i++) {
1508                         ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1509                         ctrl |= IXGBE_RXDCTL_VME;
1510                         IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1511                         /*
1512                          * Let Rx path know that it needs to store VLAN tag
1513                          * as part of extra mbuf info.
1514                          */
1515                         adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1516                 }
1517         }
1518
1519         /*
1520          * If filtering VLAN tags is disabled,
1521          * there is no need to fill VLAN Filter Table Array (VFTA).
1522          */
1523         if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1524                 return;
1525
1526         /*
1527          * A soft reset zero's out the VFTA, so
1528          * we need to repopulate it now.
1529          */
1530         for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1531                 if (ixv_shadow_vfta[i] == 0)
1532                         continue;
1533                 vfta = ixv_shadow_vfta[i];
1534                 /*
1535                  * Reconstruct the vlan id's
1536                  * based on the bits set in each
1537                  * of the array ints.
1538                  */
1539                 for (int j = 0; j < 32; j++) {
1540                         retry = 0;
1541                         if ((vfta & (1 << j)) == 0)
1542                                 continue;
1543                         vid = (i * 32) + j;
1544                         /* Call the shared code mailbox routine */
1545                         while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1546                                 if (++retry > 5)
1547                                         break;
1548                         }
1549                 }
1550         }
1551 } /* ixv_setup_vlan_support */
1552
1553 /************************************************************************
1554  * ixv_if_register_vlan
1555  *
1556  *   Run via a vlan config EVENT, it enables us to use the
1557  *   HW Filter table since we can get the vlan id. This just
1558  *   creates the entry in the soft version of the VFTA, init
1559  *   will repopulate the real table.
1560  ************************************************************************/
1561 static void
1562 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1563 {
1564         struct adapter *adapter = iflib_get_softc(ctx);
1565         u16            index, bit;
1566
1567         index = (vtag >> 5) & 0x7F;
1568         bit = vtag & 0x1F;
1569         ixv_shadow_vfta[index] |= (1 << bit);
1570         ++adapter->num_vlans;
1571 } /* ixv_if_register_vlan */
1572
1573 /************************************************************************
1574  * ixv_if_unregister_vlan
1575  *
1576  *   Run via a vlan unconfig EVENT, remove our entry
1577  *   in the soft vfta.
1578  ************************************************************************/
1579 static void
1580 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1581 {
1582         struct adapter *adapter = iflib_get_softc(ctx);
1583         u16            index, bit;
1584
1585         index = (vtag >> 5) & 0x7F;
1586         bit = vtag & 0x1F;
1587         ixv_shadow_vfta[index] &= ~(1 << bit);
1588         --adapter->num_vlans;
1589 } /* ixv_if_unregister_vlan */
1590
1591 /************************************************************************
1592  * ixv_if_enable_intr
1593  ************************************************************************/
1594 static void
1595 ixv_if_enable_intr(if_ctx_t ctx)
1596 {
1597         struct adapter  *adapter = iflib_get_softc(ctx);
1598         struct ixgbe_hw *hw = &adapter->hw;
1599         struct ix_rx_queue *que = adapter->rx_queues;
1600         u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1601
1602         IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1603
1604         mask = IXGBE_EIMS_ENABLE_MASK;
1605         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1606         IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1607
1608         for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1609                 ixv_enable_queue(adapter, que->msix);
1610
1611         IXGBE_WRITE_FLUSH(hw);
1612 } /* ixv_if_enable_intr */
1613
1614 /************************************************************************
1615  * ixv_if_disable_intr
1616  ************************************************************************/
1617 static void
1618 ixv_if_disable_intr(if_ctx_t ctx)
1619 {
1620         struct adapter *adapter = iflib_get_softc(ctx);
1621         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1622         IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1623         IXGBE_WRITE_FLUSH(&adapter->hw);
1624 } /* ixv_if_disable_intr */
1625
1626 /************************************************************************
1627  * ixv_if_rx_queue_intr_enable
1628  ************************************************************************/
1629 static int
1630 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1631 {
1632         struct adapter  *adapter = iflib_get_softc(ctx);
1633         struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1634
1635         ixv_enable_queue(adapter, que->rxr.me);
1636
1637         return (0);
1638 } /* ixv_if_rx_queue_intr_enable */
1639
1640 /************************************************************************
1641  * ixv_set_ivar
1642  *
1643  *   Setup the correct IVAR register for a particular MSI-X interrupt
1644  *    - entry is the register array entry
1645  *    - vector is the MSI-X vector for this queue
1646  *    - type is RX/TX/MISC
1647  ************************************************************************/
1648 static void
1649 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1650 {
1651         struct ixgbe_hw *hw = &adapter->hw;
1652         u32             ivar, index;
1653
1654         vector |= IXGBE_IVAR_ALLOC_VAL;
1655
1656         if (type == -1) { /* MISC IVAR */
1657                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1658                 ivar &= ~0xFF;
1659                 ivar |= vector;
1660                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1661         } else {          /* RX/TX IVARS */
1662                 index = (16 * (entry & 1)) + (8 * type);
1663                 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1664                 ivar &= ~(0xFF << index);
1665                 ivar |= (vector << index);
1666                 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1667         }
1668 } /* ixv_set_ivar */
1669
1670 /************************************************************************
1671  * ixv_configure_ivars
1672  ************************************************************************/
1673 static void
1674 ixv_configure_ivars(struct adapter *adapter)
1675 {
1676         struct ix_rx_queue *que = adapter->rx_queues;
1677
1678         MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1679
1680         for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1681                 /* First the RX queue entry */
1682                 ixv_set_ivar(adapter, i, que->msix, 0);
1683                 /* ... and the TX */
1684                 ixv_set_ivar(adapter, i, que->msix, 1);
1685                 /* Set an initial value in EITR */
1686                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1687                     IXGBE_EITR_DEFAULT);
1688         }
1689
1690         /* For the mailbox interrupt */
1691         ixv_set_ivar(adapter, 1, adapter->vector, -1);
1692 } /* ixv_configure_ivars */
1693
1694 /************************************************************************
1695  * ixv_save_stats
1696  *
1697  *   The VF stats registers never have a truly virgin
1698  *   starting point, so this routine tries to make an
1699  *   artificial one, marking ground zero on attach as
1700  *   it were.
1701  ************************************************************************/
1702 static void
1703 ixv_save_stats(struct adapter *adapter)
1704 {
1705         if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1706                 adapter->stats.vf.saved_reset_vfgprc +=
1707                     adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1708                 adapter->stats.vf.saved_reset_vfgptc +=
1709                     adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1710                 adapter->stats.vf.saved_reset_vfgorc +=
1711                     adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1712                 adapter->stats.vf.saved_reset_vfgotc +=
1713                     adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1714                 adapter->stats.vf.saved_reset_vfmprc +=
1715                     adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1716         }
1717 } /* ixv_save_stats */
1718
1719 /************************************************************************
1720  * ixv_init_stats
1721  ************************************************************************/
1722 static void
1723 ixv_init_stats(struct adapter *adapter)
1724 {
1725         struct ixgbe_hw *hw = &adapter->hw;
1726
1727         adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1728         adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1729         adapter->stats.vf.last_vfgorc |=
1730             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1731
1732         adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1733         adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1734         adapter->stats.vf.last_vfgotc |=
1735             (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1736
1737         adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1738
1739         adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1740         adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1741         adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1742         adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1743         adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1744 } /* ixv_init_stats */
1745
1746 #define UPDATE_STAT_32(reg, last, count)                \
1747 {                                                       \
1748         u32 current = IXGBE_READ_REG(hw, reg);          \
1749         if (current < last)                             \
1750                 count += 0x100000000LL;                 \
1751         last = current;                                 \
1752         count &= 0xFFFFFFFF00000000LL;                  \
1753         count |= current;                               \
1754 }
1755
1756 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1757 {                                                       \
1758         u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1759         u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1760         u64 current = ((cur_msb << 32) | cur_lsb);      \
1761         if (current < last)                             \
1762                 count += 0x1000000000LL;                \
1763         last = current;                                 \
1764         count &= 0xFFFFFFF000000000LL;                  \
1765         count |= current;                               \
1766 }
1767
1768 /************************************************************************
1769  * ixv_update_stats - Update the board statistics counters.
1770  ************************************************************************/
1771 void
1772 ixv_update_stats(struct adapter *adapter)
1773 {
1774         struct ixgbe_hw *hw = &adapter->hw;
1775         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1776
1777         UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1778             adapter->stats.vf.vfgprc);
1779         UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1780             adapter->stats.vf.vfgptc);
1781         UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1782             adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1783         UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1784             adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1785         UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1786             adapter->stats.vf.vfmprc);
1787
1788         /* Fill out the OS statistics structure */
1789         IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1790         IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1791         IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1792         IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1793         IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1794 } /* ixv_update_stats */
1795
1796 /************************************************************************
1797  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1798  ************************************************************************/
1799 static void
1800 ixv_add_stats_sysctls(struct adapter *adapter)
1801 {
1802         device_t                dev = adapter->dev;
1803         struct ix_tx_queue      *tx_que = adapter->tx_queues;
1804         struct ix_rx_queue      *rx_que = adapter->rx_queues;
1805         struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1806         struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1807         struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1808         struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1809         struct sysctl_oid       *stat_node, *queue_node;
1810         struct sysctl_oid_list  *stat_list, *queue_list;
1811
1812 #define QUEUE_NAME_LEN 32
1813         char                    namebuf[QUEUE_NAME_LEN];
1814
1815         /* Driver Statistics */
1816         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1817             CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1818         SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1819             CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1820
1821         for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1822                 struct tx_ring *txr = &tx_que->txr;
1823                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1824                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1825                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1826                 queue_list = SYSCTL_CHILDREN(queue_node);
1827
1828                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1829                     CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1830                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1831                     CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1832         }
1833
1834         for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1835                 struct rx_ring *rxr = &rx_que->rxr;
1836                 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1837                 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1838                     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1839                 queue_list = SYSCTL_CHILDREN(queue_node);
1840
1841                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1842                     CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1843                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1844                     CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1845                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1846                     CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1847                 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1848                     CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1849         }
1850
1851         stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1852             CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1853             "VF Statistics (read from HW registers)");
1854         stat_list = SYSCTL_CHILDREN(stat_node);
1855
1856         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1857             CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1858         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1859             CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1860         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1861             CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1862         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1863             CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1864         SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1865             CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1866 } /* ixv_add_stats_sysctls */
1867
1868 /************************************************************************
1869  * ixv_print_debug_info
1870  *
1871  *   Called only when em_display_debug_stats is enabled.
1872  *   Provides a way to take a look at important statistics
1873  *   maintained by the driver and hardware.
1874  ************************************************************************/
1875 static void
1876 ixv_print_debug_info(struct adapter *adapter)
1877 {
1878         device_t        dev = adapter->dev;
1879         struct ixgbe_hw *hw = &adapter->hw;
1880
1881         device_printf(dev, "Error Byte Count = %u \n",
1882             IXGBE_READ_REG(hw, IXGBE_ERRBC));
1883
1884         device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1885 } /* ixv_print_debug_info */
1886
1887 /************************************************************************
1888  * ixv_sysctl_debug
1889  ************************************************************************/
1890 static int
1891 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1892 {
1893         struct adapter *adapter;
1894         int            error, result;
1895
1896         result = -1;
1897         error = sysctl_handle_int(oidp, &result, 0, req);
1898
1899         if (error || !req->newptr)
1900                 return (error);
1901
1902         if (result == 1) {
1903                 adapter = (struct adapter *)arg1;
1904                 ixv_print_debug_info(adapter);
1905         }
1906
1907         return error;
1908 } /* ixv_sysctl_debug */
1909
1910 /************************************************************************
1911  * ixv_init_device_features
1912  ************************************************************************/
1913 static void
1914 ixv_init_device_features(struct adapter *adapter)
1915 {
1916         adapter->feat_cap = IXGBE_FEATURE_NETMAP
1917                           | IXGBE_FEATURE_VF
1918                           | IXGBE_FEATURE_LEGACY_TX;
1919
1920         /* A tad short on feature flags for VFs, atm. */
1921         switch (adapter->hw.mac.type) {
1922         case ixgbe_mac_82599_vf:
1923                 break;
1924         case ixgbe_mac_X540_vf:
1925                 break;
1926         case ixgbe_mac_X550_vf:
1927         case ixgbe_mac_X550EM_x_vf:
1928         case ixgbe_mac_X550EM_a_vf:
1929                 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1930                 adapter->feat_cap |= IXGBE_FEATURE_RSS;
1931                 break;
1932         default:
1933                 break;
1934         }
1935
1936         /* Enabled by default... */
1937         /* Is a virtual function (VF) */
1938         if (adapter->feat_cap & IXGBE_FEATURE_VF)
1939                 adapter->feat_en |= IXGBE_FEATURE_VF;
1940         /* Netmap */
1941         if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1942                 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1943         /* Receive-Side Scaling (RSS) */
1944         if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1945                 adapter->feat_en |= IXGBE_FEATURE_RSS;
1946         /* Needs advanced context descriptor regardless of offloads req'd */
1947         if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1948                 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1949 } /* ixv_init_device_features */
1950