1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 /*********************************************************************
39 *********************************************************************/
40 #define IAVF_DRIVER_VERSION_MAJOR 2
41 #define IAVF_DRIVER_VERSION_MINOR 0
42 #define IAVF_DRIVER_VERSION_BUILD 0
44 #define IAVF_DRIVER_VERSION_STRING \
45 __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \
46 __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \
47 __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
49 /*********************************************************************
52 * Used by probe to select devices to load on
54 * ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
57 static pci_vendor_info_t iavf_vendor_info_array[] =
59 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62 /* required last entry */
66 /*********************************************************************
68 *********************************************************************/
69 static void *iavf_register(device_t dev);
70 static int iavf_if_attach_pre(if_ctx_t ctx);
71 static int iavf_if_attach_post(if_ctx_t ctx);
72 static int iavf_if_detach(if_ctx_t ctx);
73 static int iavf_if_shutdown(if_ctx_t ctx);
74 static int iavf_if_suspend(if_ctx_t ctx);
75 static int iavf_if_resume(if_ctx_t ctx);
76 static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77 static void iavf_if_enable_intr(if_ctx_t ctx);
78 static void iavf_if_disable_intr(if_ctx_t ctx);
79 static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80 static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81 static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82 static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83 static void iavf_if_queues_free(if_ctx_t ctx);
84 static void iavf_if_update_admin_status(if_ctx_t ctx);
85 static void iavf_if_multi_set(if_ctx_t ctx);
86 static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87 static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88 static int iavf_if_media_change(if_ctx_t ctx);
89 static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
90 static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91 static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92 static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93 static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94 static void iavf_if_stop(if_ctx_t ctx);
96 static int iavf_allocate_pci_resources(struct iavf_sc *);
97 static int iavf_reset_complete(struct i40e_hw *);
98 static int iavf_setup_vc(struct iavf_sc *);
99 static int iavf_reset(struct iavf_sc *);
100 static int iavf_vf_config(struct iavf_sc *);
101 static void iavf_init_filters(struct iavf_sc *);
102 static void iavf_free_pci_resources(struct iavf_sc *);
103 static void iavf_free_filters(struct iavf_sc *);
104 static void iavf_setup_interface(device_t, struct iavf_sc *);
105 static void iavf_add_device_sysctls(struct iavf_sc *);
106 static void iavf_enable_adminq_irq(struct i40e_hw *);
107 static void iavf_disable_adminq_irq(struct i40e_hw *);
108 static void iavf_enable_queue_irq(struct i40e_hw *, int);
109 static void iavf_disable_queue_irq(struct i40e_hw *, int);
110 static void iavf_config_rss(struct iavf_sc *);
111 static void iavf_stop(struct iavf_sc *);
113 static int iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
114 static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
115 static int iavf_msix_que(void *);
116 static int iavf_msix_adminq(void *);
117 //static void iavf_del_multi(struct iavf_sc *sc);
118 static void iavf_init_multi(struct iavf_sc *sc);
119 static void iavf_configure_itr(struct iavf_sc *sc);
121 static int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
122 static int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
123 static int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
124 static int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
125 static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
126 static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
127 static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
129 static void iavf_save_tunables(struct iavf_sc *);
130 static enum i40e_status_code
131 iavf_process_adminq(struct iavf_sc *, u16 *);
132 static int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
133 static int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
139 static device_method_t iavf_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_register, iavf_register),
142 DEVMETHOD(device_probe, iflib_device_probe),
143 DEVMETHOD(device_attach, iflib_device_attach),
144 DEVMETHOD(device_detach, iflib_device_detach),
145 DEVMETHOD(device_shutdown, iflib_device_shutdown),
149 static driver_t iavf_driver = {
150 "iavf", iavf_methods, sizeof(struct iavf_sc),
153 devclass_t iavf_devclass;
154 DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
155 MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
156 pci, iavf, iavf_vendor_info_array,
157 nitems(iavf_vendor_info_array) - 1);
158 MODULE_VERSION(iavf, 1);
160 MODULE_DEPEND(iavf, pci, 1, 1, 1);
161 MODULE_DEPEND(iavf, ether, 1, 1, 1);
162 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
164 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
166 static device_method_t iavf_if_methods[] = {
167 DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
168 DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
169 DEVMETHOD(ifdi_detach, iavf_if_detach),
170 DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
171 DEVMETHOD(ifdi_suspend, iavf_if_suspend),
172 DEVMETHOD(ifdi_resume, iavf_if_resume),
173 DEVMETHOD(ifdi_init, iavf_if_init),
174 DEVMETHOD(ifdi_stop, iavf_if_stop),
175 DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
176 DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
177 DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
178 DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
179 DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
181 DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
182 DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
183 DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
184 DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
185 DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
186 DEVMETHOD(ifdi_media_status, iavf_if_media_status),
187 DEVMETHOD(ifdi_media_change, iavf_if_media_change),
188 DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
189 DEVMETHOD(ifdi_timer, iavf_if_timer),
190 DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
191 DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
192 DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
196 static driver_t iavf_if_driver = {
197 "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
201 ** TUNEABLE PARAMETERS:
204 static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0,
205 "iavf driver parameters");
208 * Different method for processing TX descriptor
211 static int iavf_enable_head_writeback = 0;
212 TUNABLE_INT("hw.iavf.enable_head_writeback",
213 &iavf_enable_head_writeback);
214 SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
215 &iavf_enable_head_writeback, 0,
216 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
218 static int iavf_core_debug_mask = 0;
219 TUNABLE_INT("hw.iavf.core_debug_mask",
220 &iavf_core_debug_mask);
221 SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
222 &iavf_core_debug_mask, 0,
223 "Display debug statements that are printed in non-shared code");
225 static int iavf_shared_debug_mask = 0;
226 TUNABLE_INT("hw.iavf.shared_debug_mask",
227 &iavf_shared_debug_mask);
228 SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
229 &iavf_shared_debug_mask, 0,
230 "Display debug statements that are printed in shared code");
232 int iavf_rx_itr = IXL_ITR_8K;
233 TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
234 SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
235 &iavf_rx_itr, 0, "RX Interrupt Rate");
237 int iavf_tx_itr = IXL_ITR_4K;
238 TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
239 SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
240 &iavf_tx_itr, 0, "TX Interrupt Rate");
242 extern struct if_txrx ixl_txrx_hwb;
243 extern struct if_txrx ixl_txrx_dwb;
245 static struct if_shared_ctx iavf_sctx_init = {
246 .isc_magic = IFLIB_MAGIC,
247 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
248 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
249 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
250 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
251 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
252 .isc_rx_maxsize = 16384,
253 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
254 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
259 .isc_admin_intrcnt = 1,
260 .isc_vendor_info = iavf_vendor_info_array,
261 .isc_driver_version = IAVF_DRIVER_VERSION_STRING,
262 .isc_driver = &iavf_if_driver,
263 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
265 .isc_nrxd_min = {IXL_MIN_RING},
266 .isc_ntxd_min = {IXL_MIN_RING},
267 .isc_nrxd_max = {IXL_MAX_RING},
268 .isc_ntxd_max = {IXL_MAX_RING},
269 .isc_nrxd_default = {IXL_DEFAULT_RING},
270 .isc_ntxd_default = {IXL_DEFAULT_RING},
273 if_shared_ctx_t iavf_sctx = &iavf_sctx_init;
277 iavf_register(device_t dev)
283 iavf_allocate_pci_resources(struct iavf_sc *sc)
285 struct i40e_hw *hw = &sc->hw;
286 device_t dev = iflib_get_dev(sc->vsi.ctx);
291 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
294 if (!(sc->pci_mem)) {
295 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
299 /* Save off the PCI information */
300 hw->vendor_id = pci_get_vendor(dev);
301 hw->device_id = pci_get_device(dev);
302 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
303 hw->subsystem_vendor_id =
304 pci_read_config(dev, PCIR_SUBVEND_0, 2);
305 hw->subsystem_device_id =
306 pci_read_config(dev, PCIR_SUBDEV_0, 2);
308 hw->bus.device = pci_get_slot(dev);
309 hw->bus.func = pci_get_function(dev);
311 /* Save off register access information */
312 sc->osdep.mem_bus_space_tag =
313 rman_get_bustag(sc->pci_mem);
314 sc->osdep.mem_bus_space_handle =
315 rman_get_bushandle(sc->pci_mem);
316 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
317 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
320 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
321 sc->hw.back = &sc->osdep;
327 iavf_if_attach_pre(if_ctx_t ctx)
333 if_softc_ctx_t scctx;
336 dev = iflib_get_dev(ctx);
337 sc = iflib_get_softc(ctx);
348 vsi->media = iflib_get_media(ctx);
349 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
351 iavf_save_tunables(sc);
353 /* Do PCI setup - map BAR0, etc */
354 if (iavf_allocate_pci_resources(sc)) {
355 device_printf(dev, "%s: Allocation of PCI resources failed\n",
361 iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
364 * XXX: This is called by init_shared_code in the PF driver,
365 * but the rest of that function does not support VFs.
367 error = i40e_set_mac_type(hw);
369 device_printf(dev, "%s: set_mac_type failed: %d\n",
374 error = iavf_reset_complete(hw);
376 device_printf(dev, "%s: Device is still being reset\n",
381 iavf_dbg_init(sc, "VF Device is ready for configuration\n");
383 /* Sets up Admin Queue */
384 error = iavf_setup_vc(sc);
386 device_printf(dev, "%s: Error setting up PF comms, %d\n",
391 iavf_dbg_init(sc, "PF API version verified\n");
393 /* Need API version before sending reset message */
394 error = iavf_reset(sc);
396 device_printf(dev, "VF reset failed; reload the driver\n");
400 iavf_dbg_init(sc, "VF reset complete\n");
402 /* Ask for VF config from PF */
403 error = iavf_vf_config(sc);
405 device_printf(dev, "Error getting configuration from PF: %d\n",
411 "VSIs %d, QPs %d, MSI-X %d, RSS sizes: key %d lut %d\n",
412 sc->vf_res->num_vsis,
413 sc->vf_res->num_queue_pairs,
414 sc->vf_res->max_vectors,
415 sc->vf_res->rss_key_size,
416 sc->vf_res->rss_lut_size);
417 iavf_dbg_info(sc, "Capabilities=%b\n",
418 sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
420 /* got VF config message back from PF, now we can parse it */
421 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
422 /* XXX: We only use the first VSI we find */
423 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
424 sc->vsi_res = &sc->vf_res->vsi_res[i];
427 device_printf(dev, "%s: no LAN VSI found\n", __func__);
431 vsi->id = sc->vsi_res->vsi_id;
433 iavf_dbg_init(sc, "Resource Acquisition complete\n");
435 /* If no mac address was assigned just make a random one */
436 if (!iavf_check_ether_addr(hw->mac.addr)) {
437 u8 addr[ETHER_ADDR_LEN];
438 arc4rand(&addr, sizeof(addr), 0);
441 bcopy(addr, hw->mac.addr, sizeof(addr));
443 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
444 iflib_set_mac(ctx, hw->mac.addr);
446 /* Allocate filter lists */
447 iavf_init_filters(sc);
449 /* Fill out more iflib parameters */
450 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
451 sc->vsi_res->num_queue_pairs;
452 if (vsi->enable_head_writeback) {
453 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
454 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
455 scctx->isc_txrx = &ixl_txrx_hwb;
457 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
458 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
459 scctx->isc_txrx = &ixl_txrx_dwb;
461 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
462 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
463 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
464 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
465 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
466 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
467 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
468 scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
469 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
470 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
475 free(sc->vf_res, M_IAVF);
477 i40e_shutdown_adminq(hw);
479 iavf_free_pci_resources(sc);
485 iavf_if_attach_post(if_ctx_t ctx)
493 INIT_DBG_DEV(dev, "begin");
495 dev = iflib_get_dev(ctx);
496 sc = iflib_get_softc(ctx);
498 vsi->ifp = iflib_get_ifp(ctx);
501 /* Save off determined number of queues for interface */
502 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
503 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
505 /* Setup the stack interface */
506 iavf_setup_interface(dev, sc);
508 INIT_DBG_DEV(dev, "Interface setup complete");
510 /* Initialize statistics & add sysctls */
511 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
512 iavf_add_device_sysctls(sc);
514 sc->init_state = IAVF_INIT_READY;
515 atomic_store_rel_32(&sc->queues_enabled, 0);
517 /* We want AQ enabled early for init */
518 iavf_enable_adminq_irq(hw);
520 INIT_DBG_DEV(dev, "end");
526 * XXX: iflib always ignores the return value of detach()
527 * -> This means that this isn't allowed to fail
530 iavf_if_detach(if_ctx_t ctx)
532 struct iavf_sc *sc = iflib_get_softc(ctx);
533 struct ixl_vsi *vsi = &sc->vsi;
534 struct i40e_hw *hw = &sc->hw;
535 device_t dev = sc->dev;
536 enum i40e_status_code status;
538 INIT_DBG_DEV(dev, "begin");
540 /* Remove all the media and link information */
541 ifmedia_removeall(vsi->media);
543 iavf_disable_adminq_irq(hw);
544 status = i40e_shutdown_adminq(&sc->hw);
545 if (status != I40E_SUCCESS) {
547 "i40e_shutdown_adminq() failed with status %s\n",
548 i40e_stat_str(hw, status));
551 free(sc->vf_res, M_IAVF);
552 iavf_free_pci_resources(sc);
553 iavf_free_filters(sc);
555 INIT_DBG_DEV(dev, "end");
560 iavf_if_shutdown(if_ctx_t ctx)
566 iavf_if_suspend(if_ctx_t ctx)
572 iavf_if_resume(if_ctx_t ctx)
578 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
581 if_ctx_t ctx = sc->vsi.ctx;
583 error = ixl_vc_send_cmd(sc, op);
585 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
589 /* Don't wait for a response if the device is being detached. */
590 if (!iflib_in_detach(ctx)) {
591 iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
592 error = sx_sleep(ixl_vc_get_op_chan(sc, op),
593 iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
595 if (error == EWOULDBLOCK)
596 device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
603 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
607 error = ixl_vc_send_cmd(sc, op);
609 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
615 iavf_init_queues(struct ixl_vsi *vsi)
617 struct ixl_tx_queue *tx_que = vsi->tx_queues;
618 struct ixl_rx_queue *rx_que = vsi->rx_queues;
621 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
622 ixl_init_tx_ring(vsi, tx_que);
624 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
627 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
629 wr32(vsi->hw, rxr->tail, 0);
634 iavf_if_init(if_ctx_t ctx)
636 struct iavf_sc *sc = iflib_get_softc(ctx);
637 struct ixl_vsi *vsi = &sc->vsi;
638 struct i40e_hw *hw = &sc->hw;
639 struct ifnet *ifp = iflib_get_ifp(ctx);
640 u8 tmpaddr[ETHER_ADDR_LEN];
643 INIT_DBG_IF(ifp, "begin");
645 MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
647 error = iavf_reset_complete(hw);
649 device_printf(sc->dev, "%s: VF reset failed\n",
653 if (!i40e_check_asq_alive(hw)) {
654 iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
655 pci_enable_busmaster(sc->dev);
656 i40e_shutdown_adminq(hw);
657 i40e_init_adminq(hw);
660 /* Make sure queues are disabled */
661 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
663 bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
664 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
665 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
666 error = iavf_del_mac_filter(sc, hw->mac.addr);
668 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
670 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
673 error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
674 if (!error || error == EEXIST)
675 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
676 iflib_set_mac(ctx, hw->mac.addr);
678 /* Prepare the queues for operation */
679 iavf_init_queues(vsi);
681 /* Set initial ITR values */
682 iavf_configure_itr(sc);
684 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
690 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
692 /* Init SW TX ring indices */
693 if (vsi->enable_head_writeback)
694 ixl_init_tx_cidx(vsi);
696 ixl_init_tx_rsqs(vsi);
698 /* Configure promiscuous mode */
699 iavf_if_promisc_set(ctx, if_getflags(ifp));
702 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
704 sc->init_state = IAVF_RUNNING;
708 * iavf_attach() helper function; initalizes the admin queue
709 * and attempts to establish contact with the PF by
710 * retrying the initial "API version" message several times
711 * or until the PF responds.
714 iavf_setup_vc(struct iavf_sc *sc)
716 struct i40e_hw *hw = &sc->hw;
717 device_t dev = sc->dev;
718 int error = 0, ret_error = 0, asq_retries = 0;
719 bool send_api_ver_retried = 0;
721 /* Need to set these AQ paramters before initializing AQ */
722 hw->aq.num_arq_entries = IXL_AQ_LEN;
723 hw->aq.num_asq_entries = IXL_AQ_LEN;
724 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
725 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
727 for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
728 /* Initialize admin queue */
729 error = i40e_init_adminq(hw);
731 device_printf(dev, "%s: init_adminq failed: %d\n",
737 iavf_dbg_init(sc, "Initialized Admin Queue; starting"
738 " send_api_ver attempt %d", i+1);
741 /* Send VF's API version */
742 error = iavf_send_api_ver(sc);
744 i40e_shutdown_adminq(hw);
746 device_printf(dev, "%s: unable to send api"
747 " version to PF on attempt %d, error %d\n",
748 __func__, i+1, error);
752 while (!i40e_asq_done(hw)) {
753 if (++asq_retries > IAVF_AQ_MAX_ERR) {
754 i40e_shutdown_adminq(hw);
755 device_printf(dev, "Admin Queue timeout "
756 "(waiting for send_api_ver), %d more tries...\n",
757 IAVF_AQ_MAX_ERR - (i + 1));
763 if (asq_retries > IAVF_AQ_MAX_ERR)
766 iavf_dbg_init(sc, "Sent API version message to PF");
768 /* Verify that the VF accepts the PF's API version */
769 error = iavf_verify_api_ver(sc);
770 if (error == ETIMEDOUT) {
771 if (!send_api_ver_retried) {
772 /* Resend message, one more time */
773 send_api_ver_retried = true;
775 "%s: Timeout while verifying API version on first"
776 " try!\n", __func__);
780 "%s: Timeout while verifying API version on second"
781 " try!\n", __func__);
788 "%s: Unable to verify API version,"
789 " error %s\n", __func__, i40e_stat_str(hw, error));
796 i40e_shutdown_adminq(hw);
801 * iavf_attach() helper function; asks the PF for this VF's
802 * configuration, and saves the information if it receives it.
805 iavf_vf_config(struct iavf_sc *sc)
807 struct i40e_hw *hw = &sc->hw;
808 device_t dev = sc->dev;
809 int bufsz, error = 0, ret_error = 0;
810 int asq_retries, retried = 0;
813 error = iavf_send_vf_config_msg(sc);
816 "%s: Unable to send VF config request, attempt %d,"
817 " error %d\n", __func__, retried + 1, error);
822 while (!i40e_asq_done(hw)) {
823 if (++asq_retries > IAVF_AQ_MAX_ERR) {
824 device_printf(dev, "%s: Admin Queue timeout "
825 "(waiting for send_vf_config_msg), attempt %d\n",
826 __func__, retried + 1);
833 iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
837 bufsz = sizeof(struct virtchnl_vf_resource) +
838 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
839 sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
842 "%s: Unable to allocate memory for VF configuration"
843 " message from PF on attempt %d\n", __func__, retried + 1);
849 /* Check for VF config response */
850 error = iavf_get_vf_config(sc);
851 if (error == ETIMEDOUT) {
852 /* The 1st time we timeout, send the configuration message again */
858 "%s: iavf_get_vf_config() timed out waiting for a response\n",
863 "%s: Unable to get VF configuration from PF after %d tries!\n",
864 __func__, retried + 1);
870 free(sc->vf_res, M_IAVF);
876 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
878 struct iavf_sc *sc = iflib_get_softc(ctx);
879 struct ixl_vsi *vsi = &sc->vsi;
880 struct ixl_rx_queue *rx_que = vsi->rx_queues;
881 struct ixl_tx_queue *tx_que = vsi->tx_queues;
882 int err, i, rid, vector = 0;
885 MPASS(vsi->shared->isc_nrxqsets > 0);
886 MPASS(vsi->shared->isc_ntxqsets > 0);
888 /* Admin Que is vector 0*/
890 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
891 iavf_msix_adminq, sc, 0, "aq");
893 iflib_irq_free(ctx, &vsi->irq);
894 device_printf(iflib_get_dev(ctx),
895 "Failed to register Admin Que handler");
899 /* Now set up the stations */
900 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
903 snprintf(buf, sizeof(buf), "rxq%d", i);
904 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
905 IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
906 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
907 * what's expected in the iflib context? */
909 device_printf(iflib_get_dev(ctx),
910 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
911 vsi->num_rx_queues = i + 1;
914 rx_que->msix = vector;
917 bzero(buf, sizeof(buf));
919 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
920 snprintf(buf, sizeof(buf), "txq%d", i);
921 iflib_softirq_alloc_generic(ctx,
922 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
923 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
925 /* TODO: Maybe call a strategy function for this to figure out which
926 * interrupts to map Tx queues to. I don't know if there's an immediately
927 * better way than this other than a user-supplied map, though. */
928 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
933 iflib_irq_free(ctx, &vsi->irq);
934 rx_que = vsi->rx_queues;
935 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
936 iflib_irq_free(ctx, &rx_que->que_irq);
940 /* Enable all interrupts */
942 iavf_if_enable_intr(if_ctx_t ctx)
944 struct iavf_sc *sc = iflib_get_softc(ctx);
945 struct ixl_vsi *vsi = &sc->vsi;
947 iavf_enable_intr(vsi);
950 /* Disable all interrupts */
952 iavf_if_disable_intr(if_ctx_t ctx)
954 struct iavf_sc *sc = iflib_get_softc(ctx);
955 struct ixl_vsi *vsi = &sc->vsi;
957 iavf_disable_intr(vsi);
961 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
963 struct iavf_sc *sc = iflib_get_softc(ctx);
964 struct ixl_vsi *vsi = &sc->vsi;
965 struct i40e_hw *hw = vsi->hw;
966 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
968 iavf_enable_queue_irq(hw, rx_que->msix - 1);
973 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
975 struct iavf_sc *sc = iflib_get_softc(ctx);
976 struct ixl_vsi *vsi = &sc->vsi;
977 struct i40e_hw *hw = vsi->hw;
978 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
980 iavf_enable_queue_irq(hw, tx_que->msix - 1);
985 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
987 struct iavf_sc *sc = iflib_get_softc(ctx);
988 struct ixl_vsi *vsi = &sc->vsi;
989 if_softc_ctx_t scctx = vsi->shared;
990 struct ixl_tx_queue *que;
993 MPASS(scctx->isc_ntxqsets > 0);
995 MPASS(scctx->isc_ntxqsets == ntxqsets);
997 /* Allocate queue structure memory */
998 if (!(vsi->tx_queues =
999 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1000 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1004 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1005 struct tx_ring *txr = &que->txr;
1010 if (!vsi->enable_head_writeback) {
1011 /* Allocate report status array */
1012 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1013 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1017 /* Init report status array */
1018 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1019 txr->tx_rsq[j] = QIDX_INVALID;
1021 /* get the virtual and physical address of the hardware queues */
1022 txr->tail = I40E_QTX_TAIL1(txr->me);
1023 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1024 txr->tx_paddr = paddrs[i * ntxqs];
1030 iavf_if_queues_free(ctx);
1035 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1037 struct iavf_sc *sc = iflib_get_softc(ctx);
1038 struct ixl_vsi *vsi = &sc->vsi;
1039 struct ixl_rx_queue *que;
1043 if_softc_ctx_t scctx = vsi->shared;
1044 MPASS(scctx->isc_nrxqsets > 0);
1046 MPASS(scctx->isc_nrxqsets == nrxqsets);
1049 /* Allocate queue structure memory */
1050 if (!(vsi->rx_queues =
1051 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1052 nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1053 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1058 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1059 struct rx_ring *rxr = &que->rxr;
1064 /* get the virtual and physical address of the hardware queues */
1065 rxr->tail = I40E_QRX_TAIL1(rxr->me);
1066 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1067 rxr->rx_paddr = paddrs[i * nrxqs];
1073 iavf_if_queues_free(ctx);
1078 iavf_if_queues_free(if_ctx_t ctx)
1080 struct iavf_sc *sc = iflib_get_softc(ctx);
1081 struct ixl_vsi *vsi = &sc->vsi;
1083 if (!vsi->enable_head_writeback) {
1084 struct ixl_tx_queue *que;
1087 for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1088 struct tx_ring *txr = &que->txr;
1089 if (txr->tx_rsq != NULL) {
1090 free(txr->tx_rsq, M_IAVF);
1096 if (vsi->tx_queues != NULL) {
1097 free(vsi->tx_queues, M_IAVF);
1098 vsi->tx_queues = NULL;
1100 if (vsi->rx_queues != NULL) {
1101 free(vsi->rx_queues, M_IAVF);
1102 vsi->rx_queues = NULL;
1107 iavf_check_aq_errors(struct iavf_sc *sc)
1109 struct i40e_hw *hw = &sc->hw;
1110 device_t dev = sc->dev;
1112 u8 aq_error = false;
1114 /* check for Admin queue errors */
1115 oldreg = reg = rd32(hw, hw->aq.arq.len);
1116 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1117 device_printf(dev, "ARQ VF Error detected\n");
1118 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1121 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1122 device_printf(dev, "ARQ Overflow Error detected\n");
1123 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1126 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1127 device_printf(dev, "ARQ Critical Error detected\n");
1128 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1132 wr32(hw, hw->aq.arq.len, reg);
1134 oldreg = reg = rd32(hw, hw->aq.asq.len);
1135 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1136 device_printf(dev, "ASQ VF Error detected\n");
1137 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1140 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1141 device_printf(dev, "ASQ Overflow Error detected\n");
1142 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1145 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1146 device_printf(dev, "ASQ Critical Error detected\n");
1147 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1151 wr32(hw, hw->aq.asq.len, reg);
1154 device_printf(dev, "WARNING: Stopping VF!\n");
1156 * A VF reset might not be enough to fix a problem here;
1157 * a PF reset could be required.
1159 sc->init_state = IAVF_RESET_REQUIRED;
1161 iavf_request_reset(sc);
1164 return (aq_error ? EIO : 0);
1167 static enum i40e_status_code
1168 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1170 enum i40e_status_code status = I40E_SUCCESS;
1171 struct i40e_arq_event_info event;
1172 struct i40e_hw *hw = &sc->hw;
1173 struct virtchnl_msg *v_msg;
1174 int error = 0, loop = 0;
1177 error = iavf_check_aq_errors(sc);
1179 return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1181 event.buf_len = IXL_AQ_BUF_SZ;
1182 event.msg_buf = sc->aq_buffer;
1183 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1184 v_msg = (struct virtchnl_msg *)&event.desc;
1186 /* clean and process any events */
1188 status = i40e_clean_arq_element(hw, &event, pending);
1190 * Also covers normal case when i40e_clean_arq_element()
1191 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1195 iavf_vc_completion(sc, v_msg->v_opcode,
1196 v_msg->v_retval, event.msg_buf, event.msg_len);
1197 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1198 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1200 /* Re-enable admin queue interrupt cause */
1201 reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1202 reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1203 wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1209 iavf_if_update_admin_status(if_ctx_t ctx)
1211 struct iavf_sc *sc = iflib_get_softc(ctx);
1212 struct i40e_hw *hw = &sc->hw;
1215 iavf_process_adminq(sc, &pending);
1216 iavf_update_link_status(sc);
1219 * If there are still messages to process, reschedule.
1220 * Otherwise, re-enable the Admin Queue interrupt.
1223 iflib_admin_intr_deferred(ctx);
1225 iavf_enable_adminq_irq(hw);
1229 iavf_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1231 struct iavf_sc *sc = arg;
1234 if (ifma->ifma_addr->sa_family != AF_LINK)
1236 error = iavf_add_mac_filter(sc,
1237 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1244 iavf_if_multi_set(if_ctx_t ctx)
1246 struct iavf_sc *sc = iflib_get_softc(ctx);
1249 IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1251 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1252 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1253 /* Delete MC filters and enable mulitcast promisc instead */
1254 iavf_init_multi(sc);
1255 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1256 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1260 /* If there aren't too many filters, delete existing MC filters */
1261 iavf_init_multi(sc);
1263 /* And (re-)install filters for all mcast addresses */
1264 mcnt = if_multi_apply(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc);
1267 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1271 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1273 struct iavf_sc *sc = iflib_get_softc(ctx);
1274 struct ixl_vsi *vsi = &sc->vsi;
1276 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1277 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1278 ETHER_VLAN_ENCAP_LEN)
1281 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1282 ETHER_VLAN_ENCAP_LEN;
1288 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1291 struct ifnet *ifp = iflib_get_ifp(ctx);
1293 struct iavf_sc *sc = iflib_get_softc(ctx);
1295 INIT_DBG_IF(ifp, "begin");
1297 iavf_update_link_status(sc);
1299 ifmr->ifm_status = IFM_AVALID;
1300 ifmr->ifm_active = IFM_ETHER;
1305 ifmr->ifm_status |= IFM_ACTIVE;
1306 /* Hardware is always full-duplex */
1307 ifmr->ifm_active |= IFM_FDX;
1309 /* Based on the link speed reported by the PF over the AdminQ, choose a
1310 * PHY type to report. This isn't 100% correct since we don't really
1311 * know the underlying PHY type of the PF, but at least we can report
1312 * a valid link speed...
1314 switch (sc->link_speed) {
1315 case VIRTCHNL_LINK_SPEED_100MB:
1316 ifmr->ifm_active |= IFM_100_TX;
1318 case VIRTCHNL_LINK_SPEED_1GB:
1319 ifmr->ifm_active |= IFM_1000_T;
1321 case VIRTCHNL_LINK_SPEED_10GB:
1322 ifmr->ifm_active |= IFM_10G_SR;
1324 case VIRTCHNL_LINK_SPEED_20GB:
1325 case VIRTCHNL_LINK_SPEED_25GB:
1326 ifmr->ifm_active |= IFM_25G_SR;
1328 case VIRTCHNL_LINK_SPEED_40GB:
1329 ifmr->ifm_active |= IFM_40G_SR4;
1332 ifmr->ifm_active |= IFM_UNKNOWN;
1336 INIT_DBG_IF(ifp, "end");
1340 iavf_if_media_change(if_ctx_t ctx)
1342 struct ifmedia *ifm = iflib_get_media(ctx);
1344 INIT_DEBUGOUT("ixl_media_change: begin");
1346 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1349 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1354 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1356 struct iavf_sc *sc = iflib_get_softc(ctx);
1357 struct ifnet *ifp = iflib_get_ifp(ctx);
1359 sc->promisc_flags = 0;
1361 if (flags & IFF_ALLMULTI ||
1362 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1363 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1364 if (flags & IFF_PROMISC)
1365 sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1367 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1373 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1375 struct iavf_sc *sc = iflib_get_softc(ctx);
1376 struct i40e_hw *hw = &sc->hw;
1382 /* Check for when PF triggers a VF reset */
1383 val = rd32(hw, I40E_VFGEN_RSTAT) &
1384 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1385 if (val != VIRTCHNL_VFR_VFACTIVE
1386 && val != VIRTCHNL_VFR_COMPLETED) {
1387 iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1391 /* Fire off the adminq task */
1392 iflib_admin_intr_deferred(ctx);
1395 iavf_request_stats(sc);
1399 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1401 struct iavf_sc *sc = iflib_get_softc(ctx);
1402 struct ixl_vsi *vsi = &sc->vsi;
1403 struct iavf_vlan_filter *v;
1405 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1409 v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1410 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1412 v->flags = IXL_FILTER_ADD;
1414 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1418 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1420 struct iavf_sc *sc = iflib_get_softc(ctx);
1421 struct ixl_vsi *vsi = &sc->vsi;
1422 struct iavf_vlan_filter *v;
1425 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1428 SLIST_FOREACH(v, sc->vlan_filters, next) {
1429 if (v->vlan == vtag) {
1430 v->flags = IXL_FILTER_DEL;
1436 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1440 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1442 struct iavf_sc *sc = iflib_get_softc(ctx);
1443 struct ixl_vsi *vsi = &sc->vsi;
1444 if_t ifp = iflib_get_ifp(ctx);
1447 case IFCOUNTER_IPACKETS:
1448 return (vsi->ipackets);
1449 case IFCOUNTER_IERRORS:
1450 return (vsi->ierrors);
1451 case IFCOUNTER_OPACKETS:
1452 return (vsi->opackets);
1453 case IFCOUNTER_OERRORS:
1454 return (vsi->oerrors);
1455 case IFCOUNTER_COLLISIONS:
1456 /* Collisions are by standard impossible in 40G/10G Ethernet */
1458 case IFCOUNTER_IBYTES:
1459 return (vsi->ibytes);
1460 case IFCOUNTER_OBYTES:
1461 return (vsi->obytes);
1462 case IFCOUNTER_IMCASTS:
1463 return (vsi->imcasts);
1464 case IFCOUNTER_OMCASTS:
1465 return (vsi->omcasts);
1466 case IFCOUNTER_IQDROPS:
1467 return (vsi->iqdrops);
1468 case IFCOUNTER_OQDROPS:
1469 return (vsi->oqdrops);
1470 case IFCOUNTER_NOPROTO:
1471 return (vsi->noproto);
1473 return (if_get_counter_default(ifp, cnt));
1479 iavf_free_pci_resources(struct iavf_sc *sc)
1481 struct ixl_vsi *vsi = &sc->vsi;
1482 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1483 device_t dev = sc->dev;
1485 /* We may get here before stations are set up */
1489 /* Release all interrupts */
1490 iflib_irq_free(vsi->ctx, &vsi->irq);
1492 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1493 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1496 if (sc->pci_mem != NULL)
1497 bus_release_resource(dev, SYS_RES_MEMORY,
1498 rman_get_rid(sc->pci_mem), sc->pci_mem);
1503 ** Requests a VF reset from the PF.
1505 ** Requires the VF's Admin Queue to be initialized.
1508 iavf_reset(struct iavf_sc *sc)
1510 struct i40e_hw *hw = &sc->hw;
1511 device_t dev = sc->dev;
1514 /* Ask the PF to reset us if we are initiating */
1515 if (sc->init_state != IAVF_RESET_PENDING)
1516 iavf_request_reset(sc);
1518 i40e_msec_pause(100);
1519 error = iavf_reset_complete(hw);
1521 device_printf(dev, "%s: VF reset failed\n",
1525 pci_enable_busmaster(dev);
1527 error = i40e_shutdown_adminq(hw);
1529 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1534 error = i40e_init_adminq(hw);
1536 device_printf(dev, "%s: init_adminq failed: %d\n",
1541 iavf_enable_adminq_irq(hw);
1546 iavf_reset_complete(struct i40e_hw *hw)
1550 /* Wait up to ~10 seconds */
1551 for (int i = 0; i < 100; i++) {
1552 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1553 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1555 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1556 (reg == VIRTCHNL_VFR_COMPLETED))
1558 i40e_msec_pause(100);
1565 iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1567 struct ixl_vsi *vsi = &sc->vsi;
1568 if_ctx_t ctx = vsi->ctx;
1569 struct ifnet *ifp = iflib_get_ifp(ctx);
1571 INIT_DBG_DEV(dev, "begin");
1573 vsi->shared->isc_max_frame_size =
1574 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1575 + ETHER_VLAN_ENCAP_LEN;
1576 #if __FreeBSD_version >= 1100000
1577 if_setbaudrate(ifp, IF_Gbps(40));
1579 if_initbaudrate(ifp, IF_Gbps(40));
1582 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1583 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1587 ** Get a new filter and add it to the mac filter list.
1589 static struct iavf_mac_filter *
1590 iavf_get_mac_filter(struct iavf_sc *sc)
1592 struct iavf_mac_filter *f;
1594 f = malloc(sizeof(struct iavf_mac_filter),
1595 M_IAVF, M_NOWAIT | M_ZERO);
1597 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1603 ** Find the filter with matching MAC address
1605 static struct iavf_mac_filter *
1606 iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1608 struct iavf_mac_filter *f;
1611 SLIST_FOREACH(f, sc->mac_filters, next) {
1612 if (cmp_etheraddr(f->macaddr, macaddr)) {
1624 ** Admin Queue interrupt handler
1627 iavf_msix_adminq(void *arg)
1629 struct iavf_sc *sc = arg;
1630 struct i40e_hw *hw = &sc->hw;
1632 bool do_task = FALSE;
1636 reg = rd32(hw, I40E_VFINT_ICR01);
1638 * For masking off interrupt causes that need to be handled before
1639 * they can be re-enabled
1641 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1643 /* Check on the cause */
1644 if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1645 mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1649 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1650 iavf_enable_adminq_irq(hw);
1653 return (FILTER_SCHEDULE_THREAD);
1655 return (FILTER_HANDLED);
1659 iavf_enable_intr(struct ixl_vsi *vsi)
1661 struct i40e_hw *hw = vsi->hw;
1662 struct ixl_rx_queue *que = vsi->rx_queues;
1664 iavf_enable_adminq_irq(hw);
1665 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1666 iavf_enable_queue_irq(hw, que->rxr.me);
1670 iavf_disable_intr(struct ixl_vsi *vsi)
1672 struct i40e_hw *hw = vsi->hw;
1673 struct ixl_rx_queue *que = vsi->rx_queues;
1675 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1676 iavf_disable_queue_irq(hw, que->rxr.me);
1680 iavf_disable_adminq_irq(struct i40e_hw *hw)
1682 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1683 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1685 rd32(hw, I40E_VFGEN_RSTAT);
1689 iavf_enable_adminq_irq(struct i40e_hw *hw)
1691 wr32(hw, I40E_VFINT_DYN_CTL01,
1692 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1693 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1694 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1696 rd32(hw, I40E_VFGEN_RSTAT);
1700 iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1704 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1705 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1706 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1707 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1711 iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1713 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1714 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1715 rd32(hw, I40E_VFGEN_RSTAT);
1719 iavf_configure_tx_itr(struct iavf_sc *sc)
1721 struct i40e_hw *hw = &sc->hw;
1722 struct ixl_vsi *vsi = &sc->vsi;
1723 struct ixl_tx_queue *que = vsi->tx_queues;
1725 vsi->tx_itr_setting = sc->tx_itr;
1727 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1728 struct tx_ring *txr = &que->txr;
1730 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1731 vsi->tx_itr_setting);
1732 txr->itr = vsi->tx_itr_setting;
1733 txr->latency = IXL_AVE_LATENCY;
1738 iavf_configure_rx_itr(struct iavf_sc *sc)
1740 struct i40e_hw *hw = &sc->hw;
1741 struct ixl_vsi *vsi = &sc->vsi;
1742 struct ixl_rx_queue *que = vsi->rx_queues;
1744 vsi->rx_itr_setting = sc->rx_itr;
1746 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1747 struct rx_ring *rxr = &que->rxr;
1749 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1750 vsi->rx_itr_setting);
1751 rxr->itr = vsi->rx_itr_setting;
1752 rxr->latency = IXL_AVE_LATENCY;
1757 * Get initial ITR values from tunable values.
1760 iavf_configure_itr(struct iavf_sc *sc)
1762 iavf_configure_tx_itr(sc);
1763 iavf_configure_rx_itr(sc);
1767 ** Provide a update to the queue RX
1768 ** interrupt moderation value.
1771 iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1773 struct ixl_vsi *vsi = que->vsi;
1774 struct i40e_hw *hw = vsi->hw;
1775 struct rx_ring *rxr = &que->rxr;
1777 /* Idle, do nothing */
1778 if (rxr->bytes == 0)
1781 /* Update the hardware if needed */
1782 if (rxr->itr != vsi->rx_itr_setting) {
1783 rxr->itr = vsi->rx_itr_setting;
1784 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1785 que->rxr.me), rxr->itr);
1790 iavf_msix_que(void *arg)
1792 struct ixl_rx_queue *rx_que = arg;
1796 iavf_set_queue_rx_itr(rx_que);
1797 // iavf_set_queue_tx_itr(que);
1799 return (FILTER_SCHEDULE_THREAD);
1802 /*********************************************************************
1803 * Multicast Initialization
1805 * This routine is called by init to reset a fresh state.
1807 **********************************************************************/
1809 iavf_init_multi(struct iavf_sc *sc)
1811 struct iavf_mac_filter *f;
1814 /* First clear any multicast filters */
1815 SLIST_FOREACH(f, sc->mac_filters, next) {
1816 if ((f->flags & IXL_FILTER_USED)
1817 && (f->flags & IXL_FILTER_MC)) {
1818 f->flags |= IXL_FILTER_DEL;
1823 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1827 ** Note: this routine updates the OS on the link state
1828 ** the real check of the hardware only happens with
1829 ** a link interrupt.
1832 iavf_update_link_status(struct iavf_sc *sc)
1834 struct ixl_vsi *vsi = &sc->vsi;
1838 if (vsi->link_active == FALSE) {
1839 vsi->link_active = TRUE;
1840 baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1841 iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1842 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1844 } else { /* Link down */
1845 if (vsi->link_active == TRUE) {
1846 vsi->link_active = FALSE;
1847 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1852 /*********************************************************************
1854 * This routine disables all traffic on the adapter by issuing a
1855 * global reset on the MAC and deallocates TX/RX buffers.
1857 **********************************************************************/
1860 iavf_stop(struct iavf_sc *sc)
1866 iavf_disable_intr(&sc->vsi);
1868 if (atomic_load_acq_32(&sc->queues_enabled))
1869 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1873 iavf_if_stop(if_ctx_t ctx)
1875 struct iavf_sc *sc = iflib_get_softc(ctx);
1881 iavf_config_rss_reg(struct iavf_sc *sc)
1883 struct i40e_hw *hw = &sc->hw;
1884 struct ixl_vsi *vsi = &sc->vsi;
1886 u64 set_hena = 0, hena;
1888 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1890 u32 rss_hash_config;
1893 /* Don't set up RSS if using a single queue */
1894 if (vsi->num_rx_queues == 1) {
1895 wr32(hw, I40E_VFQF_HENA(0), 0);
1896 wr32(hw, I40E_VFQF_HENA(1), 0);
1902 /* Fetch the configured RSS key */
1903 rss_getkey((uint8_t *) &rss_seed);
1905 ixl_get_default_rss_key(rss_seed);
1908 /* Fill out hash function seed */
1909 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1910 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1912 /* Enable PCTYPES for RSS: */
1914 rss_hash_config = rss_gethashconfig();
1915 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1916 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1917 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1918 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1919 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1920 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1921 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1922 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1923 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1924 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1925 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1926 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1927 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1928 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1930 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1932 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1933 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1935 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1936 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1938 /* Populate the LUT with max no. of queues in round robin fashion */
1939 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1940 if (j == vsi->num_rx_queues)
1944 * Fetch the RSS bucket id for the given indirection entry.
1945 * Cap it at the number of configured buckets (which is
1948 que_id = rss_get_indirection_to_bucket(i);
1949 que_id = que_id % vsi->num_rx_queues;
1953 /* lut = 4-byte sliding window of 4 lut entries */
1954 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1955 /* On i = 3, we have 4 entries in lut; write to the register */
1957 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1958 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1965 iavf_config_rss_pf(struct iavf_sc *sc)
1967 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1969 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1971 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1975 ** iavf_config_rss - setup RSS
1977 ** RSS keys and table are cleared on VF reset.
1980 iavf_config_rss(struct iavf_sc *sc)
1982 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1983 iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1984 iavf_config_rss_reg(sc);
1985 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1986 iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
1987 iavf_config_rss_pf(sc);
1989 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
1993 ** This routine adds new MAC filters to the sc's list;
1994 ** these are later added in hardware by sending a virtual
1998 iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
2000 struct iavf_mac_filter *f;
2002 /* Does one already exist? */
2003 f = iavf_find_mac_filter(sc, macaddr);
2005 iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
2006 MAC_FORMAT_ARGS(macaddr));
2010 /* If not, get a new empty filter */
2011 f = iavf_get_mac_filter(sc);
2013 device_printf(sc->dev, "%s: no filters available!!\n",
2018 iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2019 MAC_FORMAT_ARGS(macaddr));
2021 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2022 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2028 ** Marks a MAC filter for deletion.
2031 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2033 struct iavf_mac_filter *f;
2035 f = iavf_find_mac_filter(sc, macaddr);
2039 f->flags |= IXL_FILTER_DEL;
2044 * Re-uses the name from the PF driver.
2047 iavf_add_device_sysctls(struct iavf_sc *sc)
2049 struct ixl_vsi *vsi = &sc->vsi;
2050 device_t dev = sc->dev;
2052 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2053 struct sysctl_oid_list *ctx_list =
2054 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2055 struct sysctl_oid *debug_node;
2056 struct sysctl_oid_list *debug_list;
2058 SYSCTL_ADD_PROC(ctx, ctx_list,
2059 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
2060 sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2062 SYSCTL_ADD_PROC(ctx, ctx_list,
2063 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
2064 sc, 0, iavf_sysctl_tx_itr, "I",
2065 "Immediately set TX ITR value for all queues");
2067 SYSCTL_ADD_PROC(ctx, ctx_list,
2068 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
2069 sc, 0, iavf_sysctl_rx_itr, "I",
2070 "Immediately set RX ITR value for all queues");
2072 /* Add sysctls meant to print debug information, but don't list them
2073 * in "sysctl -a" output. */
2074 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2075 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
2076 debug_list = SYSCTL_CHILDREN(debug_node);
2078 SYSCTL_ADD_UINT(ctx, debug_list,
2079 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2080 &sc->hw.debug_mask, 0, "Shared code debug message level");
2082 SYSCTL_ADD_UINT(ctx, debug_list,
2083 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2084 &sc->dbg_mask, 0, "Non-shared code debug message level");
2086 SYSCTL_ADD_PROC(ctx, debug_list,
2087 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
2088 sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2090 SYSCTL_ADD_PROC(ctx, debug_list,
2091 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
2092 sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2094 SYSCTL_ADD_PROC(ctx, debug_list,
2095 OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
2096 sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2098 SYSCTL_ADD_PROC(ctx, debug_list,
2099 OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
2100 sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2102 /* Add stats sysctls */
2103 ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2104 ixl_add_queues_sysctls(dev, vsi);
2109 iavf_init_filters(struct iavf_sc *sc)
2111 sc->mac_filters = malloc(sizeof(struct mac_list),
2112 M_IAVF, M_WAITOK | M_ZERO);
2113 SLIST_INIT(sc->mac_filters);
2114 sc->vlan_filters = malloc(sizeof(struct vlan_list),
2115 M_IAVF, M_WAITOK | M_ZERO);
2116 SLIST_INIT(sc->vlan_filters);
2120 iavf_free_filters(struct iavf_sc *sc)
2122 struct iavf_mac_filter *f;
2123 struct iavf_vlan_filter *v;
2125 while (!SLIST_EMPTY(sc->mac_filters)) {
2126 f = SLIST_FIRST(sc->mac_filters);
2127 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2130 free(sc->mac_filters, M_IAVF);
2131 while (!SLIST_EMPTY(sc->vlan_filters)) {
2132 v = SLIST_FIRST(sc->vlan_filters);
2133 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2136 free(sc->vlan_filters, M_IAVF);
2140 iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2154 switch (link_speed) {
2155 case VIRTCHNL_LINK_SPEED_100MB:
2158 case VIRTCHNL_LINK_SPEED_1GB:
2161 case VIRTCHNL_LINK_SPEED_10GB:
2164 case VIRTCHNL_LINK_SPEED_40GB:
2167 case VIRTCHNL_LINK_SPEED_20GB:
2170 case VIRTCHNL_LINK_SPEED_25GB:
2173 case VIRTCHNL_LINK_SPEED_UNKNOWN:
2179 return speeds[index];
2183 iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2185 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2188 error = sysctl_handle_string(oidp,
2189 iavf_vc_speed_to_string(sc->link_speed),
2195 * Sanity check and save off tunable values.
2198 iavf_save_tunables(struct iavf_sc *sc)
2200 device_t dev = sc->dev;
2202 /* Save tunable information */
2203 sc->dbg_mask = iavf_core_debug_mask;
2204 sc->hw.debug_mask = iavf_shared_debug_mask;
2205 sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2207 if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2208 device_printf(dev, "Invalid tx_itr value of %d set!\n",
2210 device_printf(dev, "tx_itr must be between %d and %d, "
2213 device_printf(dev, "Using default value of %d instead\n",
2215 sc->tx_itr = IXL_ITR_4K;
2217 sc->tx_itr = iavf_tx_itr;
2219 if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2220 device_printf(dev, "Invalid rx_itr value of %d set!\n",
2222 device_printf(dev, "rx_itr must be between %d and %d, "
2225 device_printf(dev, "Using default value of %d instead\n",
2227 sc->rx_itr = IXL_ITR_8K;
2229 sc->rx_itr = iavf_rx_itr;
2233 * Used to set the Tx ITR value for all of the VF's queues.
2234 * Writes to the ITR registers immediately.
2237 iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2239 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2240 device_t dev = sc->dev;
2241 int requested_tx_itr;
2244 requested_tx_itr = sc->tx_itr;
2245 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2246 if ((error) || (req->newptr == NULL))
2248 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2250 "Invalid TX itr value; value must be between 0 and %d\n",
2255 sc->tx_itr = requested_tx_itr;
2256 iavf_configure_tx_itr(sc);
2262 * Used to set the Rx ITR value for all of the VF's queues.
2263 * Writes to the ITR registers immediately.
2266 iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2268 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2269 device_t dev = sc->dev;
2270 int requested_rx_itr;
2273 requested_rx_itr = sc->rx_itr;
2274 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2275 if ((error) || (req->newptr == NULL))
2277 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2279 "Invalid RX itr value; value must be between 0 and %d\n",
2284 sc->rx_itr = requested_rx_itr;
2285 iavf_configure_rx_itr(sc);
2291 iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2293 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2294 struct iavf_mac_filter *f;
2295 struct iavf_vlan_filter *v;
2296 device_t dev = sc->dev;
2297 int ftl_len, ftl_counter = 0, error = 0;
2300 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2302 device_printf(dev, "Could not allocate sbuf for output.\n");
2306 sbuf_printf(buf, "\n");
2308 /* Print MAC filters */
2309 sbuf_printf(buf, "MAC Filters:\n");
2311 SLIST_FOREACH(f, sc->mac_filters, next)
2314 sbuf_printf(buf, "(none)\n");
2316 SLIST_FOREACH(f, sc->mac_filters, next) {
2318 MAC_FORMAT ", flags %#06x\n",
2319 MAC_FORMAT_ARGS(f->macaddr), f->flags);
2323 /* Print VLAN filters */
2324 sbuf_printf(buf, "VLAN Filters:\n");
2326 SLIST_FOREACH(v, sc->vlan_filters, next)
2329 sbuf_printf(buf, "(none)");
2331 SLIST_FOREACH(v, sc->vlan_filters, next) {
2335 /* don't print '\n' for last entry */
2336 if (++ftl_counter != ftl_len)
2337 sbuf_printf(buf, "\n");
2341 error = sbuf_finish(buf);
2343 device_printf(dev, "Error finishing sbuf: %d\n", error);
2350 * Print out mapping of TX queue indexes and Rx queue indexes
2354 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2356 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2357 struct ixl_vsi *vsi = &sc->vsi;
2358 device_t dev = sc->dev;
2362 struct ixl_rx_queue *rx_que = vsi->rx_queues;
2363 struct ixl_tx_queue *tx_que = vsi->tx_queues;
2365 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2367 device_printf(dev, "Could not allocate sbuf for output.\n");
2371 sbuf_cat(buf, "\n");
2372 for (int i = 0; i < vsi->num_rx_queues; i++) {
2373 rx_que = &vsi->rx_queues[i];
2374 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2376 for (int i = 0; i < vsi->num_tx_queues; i++) {
2377 tx_que = &vsi->tx_queues[i];
2378 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2381 error = sbuf_finish(buf);
2383 device_printf(dev, "Error finishing sbuf: %d\n", error);
2389 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2391 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2393 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2394 int do_reset = 0, error = 0;
2396 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2397 if ((error) || (req->newptr == NULL))
2400 if (do_reset == 1) {
2402 if (CTX_ACTIVE(sc->vsi.ctx))
2403 iflib_request_reset(sc->vsi.ctx);
2410 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2412 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2413 device_t dev = sc->dev;
2414 int do_reset = 0, error = 0;
2416 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2417 if ((error) || (req->newptr == NULL))
2420 if (do_reset == 1) {
2421 if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2422 device_printf(dev, "PCIE FLR failed\n");
2425 else if (CTX_ACTIVE(sc->vsi.ctx))
2426 iflib_request_reset(sc->vsi.ctx);