1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 /*********************************************************************
39 *********************************************************************/
40 #define IAVF_DRIVER_VERSION_MAJOR 2
41 #define IAVF_DRIVER_VERSION_MINOR 0
42 #define IAVF_DRIVER_VERSION_BUILD 0
44 #define IAVF_DRIVER_VERSION_STRING \
45 __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \
46 __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \
47 __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
49 /*********************************************************************
52 * Used by probe to select devices to load on
54 * ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
57 static pci_vendor_info_t iavf_vendor_info_array[] =
59 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62 /* required last entry */
66 /*********************************************************************
68 *********************************************************************/
69 static void *iavf_register(device_t dev);
70 static int iavf_if_attach_pre(if_ctx_t ctx);
71 static int iavf_if_attach_post(if_ctx_t ctx);
72 static int iavf_if_detach(if_ctx_t ctx);
73 static int iavf_if_shutdown(if_ctx_t ctx);
74 static int iavf_if_suspend(if_ctx_t ctx);
75 static int iavf_if_resume(if_ctx_t ctx);
76 static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77 static void iavf_if_enable_intr(if_ctx_t ctx);
78 static void iavf_if_disable_intr(if_ctx_t ctx);
79 static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80 static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81 static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82 static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83 static void iavf_if_queues_free(if_ctx_t ctx);
84 static void iavf_if_update_admin_status(if_ctx_t ctx);
85 static void iavf_if_multi_set(if_ctx_t ctx);
86 static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87 static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88 static int iavf_if_media_change(if_ctx_t ctx);
89 static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
90 static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91 static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92 static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93 static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94 static void iavf_if_stop(if_ctx_t ctx);
96 static int iavf_allocate_pci_resources(struct iavf_sc *);
97 static int iavf_reset_complete(struct i40e_hw *);
98 static int iavf_setup_vc(struct iavf_sc *);
99 static int iavf_reset(struct iavf_sc *);
100 static int iavf_vf_config(struct iavf_sc *);
101 static void iavf_init_filters(struct iavf_sc *);
102 static void iavf_free_pci_resources(struct iavf_sc *);
103 static void iavf_free_filters(struct iavf_sc *);
104 static void iavf_setup_interface(device_t, struct iavf_sc *);
105 static void iavf_add_device_sysctls(struct iavf_sc *);
106 static void iavf_enable_adminq_irq(struct i40e_hw *);
107 static void iavf_disable_adminq_irq(struct i40e_hw *);
108 static void iavf_enable_queue_irq(struct i40e_hw *, int);
109 static void iavf_disable_queue_irq(struct i40e_hw *, int);
110 static void iavf_config_rss(struct iavf_sc *);
111 static void iavf_stop(struct iavf_sc *);
113 static int iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
114 static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
115 static int iavf_msix_que(void *);
116 static int iavf_msix_adminq(void *);
117 //static void iavf_del_multi(struct iavf_sc *sc);
118 static void iavf_init_multi(struct iavf_sc *sc);
119 static void iavf_configure_itr(struct iavf_sc *sc);
121 static int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
122 static int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
123 static int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
124 static int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
125 static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
126 static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
127 static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
129 static void iavf_save_tunables(struct iavf_sc *);
130 static enum i40e_status_code
131 iavf_process_adminq(struct iavf_sc *, u16 *);
132 static int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
133 static int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
139 static device_method_t iavf_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_register, iavf_register),
142 DEVMETHOD(device_probe, iflib_device_probe),
143 DEVMETHOD(device_attach, iflib_device_attach),
144 DEVMETHOD(device_detach, iflib_device_detach),
145 DEVMETHOD(device_shutdown, iflib_device_shutdown),
149 static driver_t iavf_driver = {
150 "iavf", iavf_methods, sizeof(struct iavf_sc),
153 devclass_t iavf_devclass;
154 DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
155 MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
156 pci, iavf, iavf_vendor_info_array,
157 nitems(iavf_vendor_info_array) - 1);
158 MODULE_VERSION(iavf, 1);
160 MODULE_DEPEND(iavf, pci, 1, 1, 1);
161 MODULE_DEPEND(iavf, ether, 1, 1, 1);
162 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
164 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
166 static device_method_t iavf_if_methods[] = {
167 DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
168 DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
169 DEVMETHOD(ifdi_detach, iavf_if_detach),
170 DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
171 DEVMETHOD(ifdi_suspend, iavf_if_suspend),
172 DEVMETHOD(ifdi_resume, iavf_if_resume),
173 DEVMETHOD(ifdi_init, iavf_if_init),
174 DEVMETHOD(ifdi_stop, iavf_if_stop),
175 DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
176 DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
177 DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
178 DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
179 DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
181 DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
182 DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
183 DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
184 DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
185 DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
186 DEVMETHOD(ifdi_media_status, iavf_if_media_status),
187 DEVMETHOD(ifdi_media_change, iavf_if_media_change),
188 DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
189 DEVMETHOD(ifdi_timer, iavf_if_timer),
190 DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
191 DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
192 DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
196 static driver_t iavf_if_driver = {
197 "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
201 ** TUNEABLE PARAMETERS:
204 static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0,
205 "iavf driver parameters");
208 * Different method for processing TX descriptor
211 static int iavf_enable_head_writeback = 0;
212 TUNABLE_INT("hw.iavf.enable_head_writeback",
213 &iavf_enable_head_writeback);
214 SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
215 &iavf_enable_head_writeback, 0,
216 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
218 static int iavf_core_debug_mask = 0;
219 TUNABLE_INT("hw.iavf.core_debug_mask",
220 &iavf_core_debug_mask);
221 SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
222 &iavf_core_debug_mask, 0,
223 "Display debug statements that are printed in non-shared code");
225 static int iavf_shared_debug_mask = 0;
226 TUNABLE_INT("hw.iavf.shared_debug_mask",
227 &iavf_shared_debug_mask);
228 SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
229 &iavf_shared_debug_mask, 0,
230 "Display debug statements that are printed in shared code");
232 int iavf_rx_itr = IXL_ITR_8K;
233 TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
234 SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
235 &iavf_rx_itr, 0, "RX Interrupt Rate");
237 int iavf_tx_itr = IXL_ITR_4K;
238 TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
239 SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
240 &iavf_tx_itr, 0, "TX Interrupt Rate");
242 extern struct if_txrx ixl_txrx_hwb;
243 extern struct if_txrx ixl_txrx_dwb;
245 static struct if_shared_ctx iavf_sctx_init = {
246 .isc_magic = IFLIB_MAGIC,
247 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
248 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
249 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
250 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
251 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
252 .isc_rx_maxsize = 16384,
253 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
254 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
259 .isc_admin_intrcnt = 1,
260 .isc_vendor_info = iavf_vendor_info_array,
261 .isc_driver_version = IAVF_DRIVER_VERSION_STRING,
262 .isc_driver = &iavf_if_driver,
263 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
265 .isc_nrxd_min = {IXL_MIN_RING},
266 .isc_ntxd_min = {IXL_MIN_RING},
267 .isc_nrxd_max = {IXL_MAX_RING},
268 .isc_ntxd_max = {IXL_MAX_RING},
269 .isc_nrxd_default = {IXL_DEFAULT_RING},
270 .isc_ntxd_default = {IXL_DEFAULT_RING},
273 if_shared_ctx_t iavf_sctx = &iavf_sctx_init;
277 iavf_register(device_t dev)
283 iavf_allocate_pci_resources(struct iavf_sc *sc)
285 struct i40e_hw *hw = &sc->hw;
286 device_t dev = iflib_get_dev(sc->vsi.ctx);
291 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
294 if (!(sc->pci_mem)) {
295 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
299 /* Save off the PCI information */
300 hw->vendor_id = pci_get_vendor(dev);
301 hw->device_id = pci_get_device(dev);
302 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
303 hw->subsystem_vendor_id =
304 pci_read_config(dev, PCIR_SUBVEND_0, 2);
305 hw->subsystem_device_id =
306 pci_read_config(dev, PCIR_SUBDEV_0, 2);
308 hw->bus.device = pci_get_slot(dev);
309 hw->bus.func = pci_get_function(dev);
311 /* Save off register access information */
312 sc->osdep.mem_bus_space_tag =
313 rman_get_bustag(sc->pci_mem);
314 sc->osdep.mem_bus_space_handle =
315 rman_get_bushandle(sc->pci_mem);
316 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
317 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
320 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
321 sc->hw.back = &sc->osdep;
327 iavf_if_attach_pre(if_ctx_t ctx)
333 if_softc_ctx_t scctx;
336 dev = iflib_get_dev(ctx);
337 sc = iflib_get_softc(ctx);
348 vsi->media = iflib_get_media(ctx);
349 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
351 iavf_save_tunables(sc);
353 /* Do PCI setup - map BAR0, etc */
354 if (iavf_allocate_pci_resources(sc)) {
355 device_printf(dev, "%s: Allocation of PCI resources failed\n",
361 iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
364 * XXX: This is called by init_shared_code in the PF driver,
365 * but the rest of that function does not support VFs.
367 error = i40e_set_mac_type(hw);
369 device_printf(dev, "%s: set_mac_type failed: %d\n",
374 error = iavf_reset_complete(hw);
376 device_printf(dev, "%s: Device is still being reset\n",
381 iavf_dbg_init(sc, "VF Device is ready for configuration\n");
383 /* Sets up Admin Queue */
384 error = iavf_setup_vc(sc);
386 device_printf(dev, "%s: Error setting up PF comms, %d\n",
391 iavf_dbg_init(sc, "PF API version verified\n");
393 /* Need API version before sending reset message */
394 error = iavf_reset(sc);
396 device_printf(dev, "VF reset failed; reload the driver\n");
400 iavf_dbg_init(sc, "VF reset complete\n");
402 /* Ask for VF config from PF */
403 error = iavf_vf_config(sc);
405 device_printf(dev, "Error getting configuration from PF: %d\n",
411 "VSIs %d, QPs %d, MSI-X %d, RSS sizes: key %d lut %d\n",
412 sc->vf_res->num_vsis,
413 sc->vf_res->num_queue_pairs,
414 sc->vf_res->max_vectors,
415 sc->vf_res->rss_key_size,
416 sc->vf_res->rss_lut_size);
417 iavf_dbg_info(sc, "Capabilities=%b\n",
418 sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
420 /* got VF config message back from PF, now we can parse it */
421 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
422 /* XXX: We only use the first VSI we find */
423 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
424 sc->vsi_res = &sc->vf_res->vsi_res[i];
427 device_printf(dev, "%s: no LAN VSI found\n", __func__);
431 vsi->id = sc->vsi_res->vsi_id;
433 iavf_dbg_init(sc, "Resource Acquisition complete\n");
435 /* If no mac address was assigned just make a random one */
436 if (!iavf_check_ether_addr(hw->mac.addr)) {
437 u8 addr[ETHER_ADDR_LEN];
438 arc4rand(&addr, sizeof(addr), 0);
441 bcopy(addr, hw->mac.addr, sizeof(addr));
443 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
444 iflib_set_mac(ctx, hw->mac.addr);
446 /* Allocate filter lists */
447 iavf_init_filters(sc);
449 /* Fill out more iflib parameters */
450 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
451 sc->vsi_res->num_queue_pairs;
452 if (vsi->enable_head_writeback) {
453 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
454 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
455 scctx->isc_txrx = &ixl_txrx_hwb;
457 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
458 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
459 scctx->isc_txrx = &ixl_txrx_dwb;
461 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
462 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
463 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
464 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
465 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
466 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
467 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
468 scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
469 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
470 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
475 free(sc->vf_res, M_IAVF);
477 i40e_shutdown_adminq(hw);
479 iavf_free_pci_resources(sc);
485 iavf_if_attach_post(if_ctx_t ctx)
493 INIT_DBG_DEV(dev, "begin");
495 dev = iflib_get_dev(ctx);
496 sc = iflib_get_softc(ctx);
498 vsi->ifp = iflib_get_ifp(ctx);
501 /* Save off determined number of queues for interface */
502 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
503 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
505 /* Setup the stack interface */
506 iavf_setup_interface(dev, sc);
508 INIT_DBG_DEV(dev, "Interface setup complete");
510 /* Initialize statistics & add sysctls */
511 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
512 iavf_add_device_sysctls(sc);
514 sc->init_state = IAVF_INIT_READY;
515 atomic_store_rel_32(&sc->queues_enabled, 0);
517 /* We want AQ enabled early for init */
518 iavf_enable_adminq_irq(hw);
520 INIT_DBG_DEV(dev, "end");
526 * XXX: iflib always ignores the return value of detach()
527 * -> This means that this isn't allowed to fail
530 iavf_if_detach(if_ctx_t ctx)
532 struct iavf_sc *sc = iflib_get_softc(ctx);
533 struct ixl_vsi *vsi = &sc->vsi;
534 struct i40e_hw *hw = &sc->hw;
535 device_t dev = sc->dev;
536 enum i40e_status_code status;
538 INIT_DBG_DEV(dev, "begin");
540 /* Remove all the media and link information */
541 ifmedia_removeall(vsi->media);
543 iavf_disable_adminq_irq(hw);
544 status = i40e_shutdown_adminq(&sc->hw);
545 if (status != I40E_SUCCESS) {
547 "i40e_shutdown_adminq() failed with status %s\n",
548 i40e_stat_str(hw, status));
551 free(sc->vf_res, M_IAVF);
552 iavf_free_pci_resources(sc);
553 iavf_free_filters(sc);
555 INIT_DBG_DEV(dev, "end");
560 iavf_if_shutdown(if_ctx_t ctx)
566 iavf_if_suspend(if_ctx_t ctx)
572 iavf_if_resume(if_ctx_t ctx)
578 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
581 if_ctx_t ctx = sc->vsi.ctx;
583 error = ixl_vc_send_cmd(sc, op);
585 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
589 /* Don't wait for a response if the device is being detached. */
590 if (!iflib_in_detach(ctx)) {
591 iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
592 error = sx_sleep(ixl_vc_get_op_chan(sc, op),
593 iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
595 if (error == EWOULDBLOCK)
596 device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
603 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
607 error = ixl_vc_send_cmd(sc, op);
609 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
615 iavf_init_queues(struct ixl_vsi *vsi)
617 struct ixl_tx_queue *tx_que = vsi->tx_queues;
618 struct ixl_rx_queue *rx_que = vsi->rx_queues;
621 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
622 ixl_init_tx_ring(vsi, tx_que);
624 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
627 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
629 wr32(vsi->hw, rxr->tail, 0);
634 iavf_if_init(if_ctx_t ctx)
636 struct iavf_sc *sc = iflib_get_softc(ctx);
637 struct ixl_vsi *vsi = &sc->vsi;
638 struct i40e_hw *hw = &sc->hw;
639 struct ifnet *ifp = iflib_get_ifp(ctx);
640 u8 tmpaddr[ETHER_ADDR_LEN];
643 INIT_DBG_IF(ifp, "begin");
645 MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
647 error = iavf_reset_complete(hw);
649 device_printf(sc->dev, "%s: VF reset failed\n",
653 if (!i40e_check_asq_alive(hw)) {
654 iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
655 pci_enable_busmaster(sc->dev);
656 i40e_shutdown_adminq(hw);
657 i40e_init_adminq(hw);
660 /* Make sure queues are disabled */
661 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
663 bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
664 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
665 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
666 error = iavf_del_mac_filter(sc, hw->mac.addr);
668 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
670 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
673 error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
674 if (!error || error == EEXIST)
675 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
676 iflib_set_mac(ctx, hw->mac.addr);
678 /* Prepare the queues for operation */
679 iavf_init_queues(vsi);
681 /* Set initial ITR values */
682 iavf_configure_itr(sc);
684 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
690 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
692 /* Init SW TX ring indices */
693 if (vsi->enable_head_writeback)
694 ixl_init_tx_cidx(vsi);
696 ixl_init_tx_rsqs(vsi);
698 /* Configure promiscuous mode */
699 iavf_if_promisc_set(ctx, if_getflags(ifp));
702 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
704 sc->init_state = IAVF_RUNNING;
708 * iavf_attach() helper function; initalizes the admin queue
709 * and attempts to establish contact with the PF by
710 * retrying the initial "API version" message several times
711 * or until the PF responds.
714 iavf_setup_vc(struct iavf_sc *sc)
716 struct i40e_hw *hw = &sc->hw;
717 device_t dev = sc->dev;
718 int error = 0, ret_error = 0, asq_retries = 0;
719 bool send_api_ver_retried = 0;
721 /* Need to set these AQ paramters before initializing AQ */
722 hw->aq.num_arq_entries = IXL_AQ_LEN;
723 hw->aq.num_asq_entries = IXL_AQ_LEN;
724 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
725 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
727 for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
728 /* Initialize admin queue */
729 error = i40e_init_adminq(hw);
731 device_printf(dev, "%s: init_adminq failed: %d\n",
737 iavf_dbg_init(sc, "Initialized Admin Queue; starting"
738 " send_api_ver attempt %d", i+1);
741 /* Send VF's API version */
742 error = iavf_send_api_ver(sc);
744 i40e_shutdown_adminq(hw);
746 device_printf(dev, "%s: unable to send api"
747 " version to PF on attempt %d, error %d\n",
748 __func__, i+1, error);
752 while (!i40e_asq_done(hw)) {
753 if (++asq_retries > IAVF_AQ_MAX_ERR) {
754 i40e_shutdown_adminq(hw);
755 device_printf(dev, "Admin Queue timeout "
756 "(waiting for send_api_ver), %d more tries...\n",
757 IAVF_AQ_MAX_ERR - (i + 1));
763 if (asq_retries > IAVF_AQ_MAX_ERR)
766 iavf_dbg_init(sc, "Sent API version message to PF");
768 /* Verify that the VF accepts the PF's API version */
769 error = iavf_verify_api_ver(sc);
770 if (error == ETIMEDOUT) {
771 if (!send_api_ver_retried) {
772 /* Resend message, one more time */
773 send_api_ver_retried = true;
775 "%s: Timeout while verifying API version on first"
776 " try!\n", __func__);
780 "%s: Timeout while verifying API version on second"
781 " try!\n", __func__);
788 "%s: Unable to verify API version,"
789 " error %s\n", __func__, i40e_stat_str(hw, error));
796 i40e_shutdown_adminq(hw);
801 * iavf_attach() helper function; asks the PF for this VF's
802 * configuration, and saves the information if it receives it.
805 iavf_vf_config(struct iavf_sc *sc)
807 struct i40e_hw *hw = &sc->hw;
808 device_t dev = sc->dev;
809 int bufsz, error = 0, ret_error = 0;
810 int asq_retries, retried = 0;
813 error = iavf_send_vf_config_msg(sc);
816 "%s: Unable to send VF config request, attempt %d,"
817 " error %d\n", __func__, retried + 1, error);
822 while (!i40e_asq_done(hw)) {
823 if (++asq_retries > IAVF_AQ_MAX_ERR) {
824 device_printf(dev, "%s: Admin Queue timeout "
825 "(waiting for send_vf_config_msg), attempt %d\n",
826 __func__, retried + 1);
833 iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
837 bufsz = sizeof(struct virtchnl_vf_resource) +
838 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
839 sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
842 "%s: Unable to allocate memory for VF configuration"
843 " message from PF on attempt %d\n", __func__, retried + 1);
849 /* Check for VF config response */
850 error = iavf_get_vf_config(sc);
851 if (error == ETIMEDOUT) {
852 /* The 1st time we timeout, send the configuration message again */
858 "%s: iavf_get_vf_config() timed out waiting for a response\n",
863 "%s: Unable to get VF configuration from PF after %d tries!\n",
864 __func__, retried + 1);
870 free(sc->vf_res, M_IAVF);
876 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
878 struct iavf_sc *sc = iflib_get_softc(ctx);
879 struct ixl_vsi *vsi = &sc->vsi;
880 struct ixl_rx_queue *rx_que = vsi->rx_queues;
881 struct ixl_tx_queue *tx_que = vsi->tx_queues;
882 int err, i, rid, vector = 0;
885 MPASS(vsi->shared->isc_nrxqsets > 0);
886 MPASS(vsi->shared->isc_ntxqsets > 0);
888 /* Admin Que is vector 0*/
890 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
891 iavf_msix_adminq, sc, 0, "aq");
893 iflib_irq_free(ctx, &vsi->irq);
894 device_printf(iflib_get_dev(ctx),
895 "Failed to register Admin Que handler");
899 /* Now set up the stations */
900 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
903 snprintf(buf, sizeof(buf), "rxq%d", i);
904 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
905 IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
906 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
907 * what's expected in the iflib context? */
909 device_printf(iflib_get_dev(ctx),
910 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
911 vsi->num_rx_queues = i + 1;
914 rx_que->msix = vector;
917 bzero(buf, sizeof(buf));
919 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
920 snprintf(buf, sizeof(buf), "txq%d", i);
921 iflib_softirq_alloc_generic(ctx,
922 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
923 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
925 /* TODO: Maybe call a strategy function for this to figure out which
926 * interrupts to map Tx queues to. I don't know if there's an immediately
927 * better way than this other than a user-supplied map, though. */
928 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
933 iflib_irq_free(ctx, &vsi->irq);
934 rx_que = vsi->rx_queues;
935 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
936 iflib_irq_free(ctx, &rx_que->que_irq);
940 /* Enable all interrupts */
942 iavf_if_enable_intr(if_ctx_t ctx)
944 struct iavf_sc *sc = iflib_get_softc(ctx);
945 struct ixl_vsi *vsi = &sc->vsi;
947 iavf_enable_intr(vsi);
950 /* Disable all interrupts */
952 iavf_if_disable_intr(if_ctx_t ctx)
954 struct iavf_sc *sc = iflib_get_softc(ctx);
955 struct ixl_vsi *vsi = &sc->vsi;
957 iavf_disable_intr(vsi);
961 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
963 struct iavf_sc *sc = iflib_get_softc(ctx);
964 struct ixl_vsi *vsi = &sc->vsi;
965 struct i40e_hw *hw = vsi->hw;
966 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
968 iavf_enable_queue_irq(hw, rx_que->msix - 1);
973 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
975 struct iavf_sc *sc = iflib_get_softc(ctx);
976 struct ixl_vsi *vsi = &sc->vsi;
977 struct i40e_hw *hw = vsi->hw;
978 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
980 iavf_enable_queue_irq(hw, tx_que->msix - 1);
985 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
987 struct iavf_sc *sc = iflib_get_softc(ctx);
988 struct ixl_vsi *vsi = &sc->vsi;
989 if_softc_ctx_t scctx = vsi->shared;
990 struct ixl_tx_queue *que;
993 MPASS(scctx->isc_ntxqsets > 0);
995 MPASS(scctx->isc_ntxqsets == ntxqsets);
997 /* Allocate queue structure memory */
998 if (!(vsi->tx_queues =
999 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1000 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1004 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1005 struct tx_ring *txr = &que->txr;
1010 if (!vsi->enable_head_writeback) {
1011 /* Allocate report status array */
1012 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1013 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1017 /* Init report status array */
1018 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1019 txr->tx_rsq[j] = QIDX_INVALID;
1021 /* get the virtual and physical address of the hardware queues */
1022 txr->tail = I40E_QTX_TAIL1(txr->me);
1023 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1024 txr->tx_paddr = paddrs[i * ntxqs];
1030 iavf_if_queues_free(ctx);
1035 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1037 struct iavf_sc *sc = iflib_get_softc(ctx);
1038 struct ixl_vsi *vsi = &sc->vsi;
1039 struct ixl_rx_queue *que;
1043 if_softc_ctx_t scctx = vsi->shared;
1044 MPASS(scctx->isc_nrxqsets > 0);
1046 MPASS(scctx->isc_nrxqsets == nrxqsets);
1049 /* Allocate queue structure memory */
1050 if (!(vsi->rx_queues =
1051 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1052 nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1053 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1058 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1059 struct rx_ring *rxr = &que->rxr;
1064 /* get the virtual and physical address of the hardware queues */
1065 rxr->tail = I40E_QRX_TAIL1(rxr->me);
1066 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1067 rxr->rx_paddr = paddrs[i * nrxqs];
1073 iavf_if_queues_free(ctx);
1078 iavf_if_queues_free(if_ctx_t ctx)
1080 struct iavf_sc *sc = iflib_get_softc(ctx);
1081 struct ixl_vsi *vsi = &sc->vsi;
1083 if (!vsi->enable_head_writeback) {
1084 struct ixl_tx_queue *que;
1087 for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1088 struct tx_ring *txr = &que->txr;
1089 if (txr->tx_rsq != NULL) {
1090 free(txr->tx_rsq, M_IAVF);
1096 if (vsi->tx_queues != NULL) {
1097 free(vsi->tx_queues, M_IAVF);
1098 vsi->tx_queues = NULL;
1100 if (vsi->rx_queues != NULL) {
1101 free(vsi->rx_queues, M_IAVF);
1102 vsi->rx_queues = NULL;
1107 iavf_check_aq_errors(struct iavf_sc *sc)
1109 struct i40e_hw *hw = &sc->hw;
1110 device_t dev = sc->dev;
1112 u8 aq_error = false;
1114 /* check for Admin queue errors */
1115 oldreg = reg = rd32(hw, hw->aq.arq.len);
1116 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1117 device_printf(dev, "ARQ VF Error detected\n");
1118 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1121 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1122 device_printf(dev, "ARQ Overflow Error detected\n");
1123 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1126 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1127 device_printf(dev, "ARQ Critical Error detected\n");
1128 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1132 wr32(hw, hw->aq.arq.len, reg);
1134 oldreg = reg = rd32(hw, hw->aq.asq.len);
1135 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1136 device_printf(dev, "ASQ VF Error detected\n");
1137 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1140 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1141 device_printf(dev, "ASQ Overflow Error detected\n");
1142 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1145 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1146 device_printf(dev, "ASQ Critical Error detected\n");
1147 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1151 wr32(hw, hw->aq.asq.len, reg);
1154 device_printf(dev, "WARNING: Stopping VF!\n");
1156 * A VF reset might not be enough to fix a problem here;
1157 * a PF reset could be required.
1159 sc->init_state = IAVF_RESET_REQUIRED;
1161 iavf_request_reset(sc);
1164 return (aq_error ? EIO : 0);
1167 static enum i40e_status_code
1168 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1170 enum i40e_status_code status = I40E_SUCCESS;
1171 struct i40e_arq_event_info event;
1172 struct i40e_hw *hw = &sc->hw;
1173 struct virtchnl_msg *v_msg;
1174 int error = 0, loop = 0;
1177 error = iavf_check_aq_errors(sc);
1179 return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1181 event.buf_len = IXL_AQ_BUF_SZ;
1182 event.msg_buf = sc->aq_buffer;
1183 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1184 v_msg = (struct virtchnl_msg *)&event.desc;
1186 /* clean and process any events */
1188 status = i40e_clean_arq_element(hw, &event, pending);
1190 * Also covers normal case when i40e_clean_arq_element()
1191 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1195 iavf_vc_completion(sc, v_msg->v_opcode,
1196 v_msg->v_retval, event.msg_buf, event.msg_len);
1197 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1198 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1200 /* Re-enable admin queue interrupt cause */
1201 reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1202 reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1203 wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1209 iavf_if_update_admin_status(if_ctx_t ctx)
1211 struct iavf_sc *sc = iflib_get_softc(ctx);
1212 struct i40e_hw *hw = &sc->hw;
1215 iavf_process_adminq(sc, &pending);
1216 iavf_update_link_status(sc);
1219 * If there are still messages to process, reschedule.
1220 * Otherwise, re-enable the Admin Queue interrupt.
1223 iflib_admin_intr_deferred(ctx);
1225 iavf_enable_adminq_irq(hw);
1229 iavf_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1231 struct iavf_sc *sc = arg;
1234 error = iavf_add_mac_filter(sc, (u8*)LLADDR(sdl), IXL_FILTER_MC);
1239 iavf_if_multi_set(if_ctx_t ctx)
1241 struct iavf_sc *sc = iflib_get_softc(ctx);
1243 IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1245 if (__predict_false(if_llmaddr_count(iflib_get_ifp(ctx)) >=
1246 MAX_MULTICAST_ADDR)) {
1247 /* Delete MC filters and enable mulitcast promisc instead */
1248 iavf_init_multi(sc);
1249 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1250 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1254 /* If there aren't too many filters, delete existing MC filters */
1255 iavf_init_multi(sc);
1257 /* And (re-)install filters for all mcast addresses */
1258 if (if_foreach_llmaddr(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc) >
1260 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1264 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1266 struct iavf_sc *sc = iflib_get_softc(ctx);
1267 struct ixl_vsi *vsi = &sc->vsi;
1269 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1270 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1271 ETHER_VLAN_ENCAP_LEN)
1274 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1275 ETHER_VLAN_ENCAP_LEN;
1281 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1284 struct ifnet *ifp = iflib_get_ifp(ctx);
1286 struct iavf_sc *sc = iflib_get_softc(ctx);
1288 INIT_DBG_IF(ifp, "begin");
1290 iavf_update_link_status(sc);
1292 ifmr->ifm_status = IFM_AVALID;
1293 ifmr->ifm_active = IFM_ETHER;
1298 ifmr->ifm_status |= IFM_ACTIVE;
1299 /* Hardware is always full-duplex */
1300 ifmr->ifm_active |= IFM_FDX;
1302 /* Based on the link speed reported by the PF over the AdminQ, choose a
1303 * PHY type to report. This isn't 100% correct since we don't really
1304 * know the underlying PHY type of the PF, but at least we can report
1305 * a valid link speed...
1307 switch (sc->link_speed) {
1308 case VIRTCHNL_LINK_SPEED_100MB:
1309 ifmr->ifm_active |= IFM_100_TX;
1311 case VIRTCHNL_LINK_SPEED_1GB:
1312 ifmr->ifm_active |= IFM_1000_T;
1314 case VIRTCHNL_LINK_SPEED_10GB:
1315 ifmr->ifm_active |= IFM_10G_SR;
1317 case VIRTCHNL_LINK_SPEED_20GB:
1318 case VIRTCHNL_LINK_SPEED_25GB:
1319 ifmr->ifm_active |= IFM_25G_SR;
1321 case VIRTCHNL_LINK_SPEED_40GB:
1322 ifmr->ifm_active |= IFM_40G_SR4;
1325 ifmr->ifm_active |= IFM_UNKNOWN;
1329 INIT_DBG_IF(ifp, "end");
1333 iavf_if_media_change(if_ctx_t ctx)
1335 struct ifmedia *ifm = iflib_get_media(ctx);
1337 INIT_DEBUGOUT("ixl_media_change: begin");
1339 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1342 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1347 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1349 struct iavf_sc *sc = iflib_get_softc(ctx);
1350 struct ifnet *ifp = iflib_get_ifp(ctx);
1352 sc->promisc_flags = 0;
1354 if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1356 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1357 if (flags & IFF_PROMISC)
1358 sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1360 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1366 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1368 struct iavf_sc *sc = iflib_get_softc(ctx);
1369 struct i40e_hw *hw = &sc->hw;
1375 /* Check for when PF triggers a VF reset */
1376 val = rd32(hw, I40E_VFGEN_RSTAT) &
1377 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1378 if (val != VIRTCHNL_VFR_VFACTIVE
1379 && val != VIRTCHNL_VFR_COMPLETED) {
1380 iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1384 /* Fire off the adminq task */
1385 iflib_admin_intr_deferred(ctx);
1388 iavf_request_stats(sc);
1392 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1394 struct iavf_sc *sc = iflib_get_softc(ctx);
1395 struct ixl_vsi *vsi = &sc->vsi;
1396 struct iavf_vlan_filter *v;
1398 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1402 v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1403 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1405 v->flags = IXL_FILTER_ADD;
1407 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1411 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1413 struct iavf_sc *sc = iflib_get_softc(ctx);
1414 struct ixl_vsi *vsi = &sc->vsi;
1415 struct iavf_vlan_filter *v;
1418 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1421 SLIST_FOREACH(v, sc->vlan_filters, next) {
1422 if (v->vlan == vtag) {
1423 v->flags = IXL_FILTER_DEL;
1429 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1433 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1435 struct iavf_sc *sc = iflib_get_softc(ctx);
1436 struct ixl_vsi *vsi = &sc->vsi;
1437 if_t ifp = iflib_get_ifp(ctx);
1440 case IFCOUNTER_IPACKETS:
1441 return (vsi->ipackets);
1442 case IFCOUNTER_IERRORS:
1443 return (vsi->ierrors);
1444 case IFCOUNTER_OPACKETS:
1445 return (vsi->opackets);
1446 case IFCOUNTER_OERRORS:
1447 return (vsi->oerrors);
1448 case IFCOUNTER_COLLISIONS:
1449 /* Collisions are by standard impossible in 40G/10G Ethernet */
1451 case IFCOUNTER_IBYTES:
1452 return (vsi->ibytes);
1453 case IFCOUNTER_OBYTES:
1454 return (vsi->obytes);
1455 case IFCOUNTER_IMCASTS:
1456 return (vsi->imcasts);
1457 case IFCOUNTER_OMCASTS:
1458 return (vsi->omcasts);
1459 case IFCOUNTER_IQDROPS:
1460 return (vsi->iqdrops);
1461 case IFCOUNTER_OQDROPS:
1462 return (vsi->oqdrops);
1463 case IFCOUNTER_NOPROTO:
1464 return (vsi->noproto);
1466 return (if_get_counter_default(ifp, cnt));
1472 iavf_free_pci_resources(struct iavf_sc *sc)
1474 struct ixl_vsi *vsi = &sc->vsi;
1475 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1476 device_t dev = sc->dev;
1478 /* We may get here before stations are set up */
1482 /* Release all interrupts */
1483 iflib_irq_free(vsi->ctx, &vsi->irq);
1485 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1486 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1489 if (sc->pci_mem != NULL)
1490 bus_release_resource(dev, SYS_RES_MEMORY,
1491 rman_get_rid(sc->pci_mem), sc->pci_mem);
1496 ** Requests a VF reset from the PF.
1498 ** Requires the VF's Admin Queue to be initialized.
1501 iavf_reset(struct iavf_sc *sc)
1503 struct i40e_hw *hw = &sc->hw;
1504 device_t dev = sc->dev;
1507 /* Ask the PF to reset us if we are initiating */
1508 if (sc->init_state != IAVF_RESET_PENDING)
1509 iavf_request_reset(sc);
1511 i40e_msec_pause(100);
1512 error = iavf_reset_complete(hw);
1514 device_printf(dev, "%s: VF reset failed\n",
1518 pci_enable_busmaster(dev);
1520 error = i40e_shutdown_adminq(hw);
1522 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1527 error = i40e_init_adminq(hw);
1529 device_printf(dev, "%s: init_adminq failed: %d\n",
1534 iavf_enable_adminq_irq(hw);
1539 iavf_reset_complete(struct i40e_hw *hw)
1543 /* Wait up to ~10 seconds */
1544 for (int i = 0; i < 100; i++) {
1545 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1546 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1548 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1549 (reg == VIRTCHNL_VFR_COMPLETED))
1551 i40e_msec_pause(100);
1558 iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1560 struct ixl_vsi *vsi = &sc->vsi;
1561 if_ctx_t ctx = vsi->ctx;
1562 struct ifnet *ifp = iflib_get_ifp(ctx);
1564 INIT_DBG_DEV(dev, "begin");
1566 vsi->shared->isc_max_frame_size =
1567 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1568 + ETHER_VLAN_ENCAP_LEN;
1569 #if __FreeBSD_version >= 1100000
1570 if_setbaudrate(ifp, IF_Gbps(40));
1572 if_initbaudrate(ifp, IF_Gbps(40));
1575 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1576 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1580 ** Get a new filter and add it to the mac filter list.
1582 static struct iavf_mac_filter *
1583 iavf_get_mac_filter(struct iavf_sc *sc)
1585 struct iavf_mac_filter *f;
1587 f = malloc(sizeof(struct iavf_mac_filter),
1588 M_IAVF, M_NOWAIT | M_ZERO);
1590 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1596 ** Find the filter with matching MAC address
1598 static struct iavf_mac_filter *
1599 iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1601 struct iavf_mac_filter *f;
1604 SLIST_FOREACH(f, sc->mac_filters, next) {
1605 if (cmp_etheraddr(f->macaddr, macaddr)) {
1617 ** Admin Queue interrupt handler
1620 iavf_msix_adminq(void *arg)
1622 struct iavf_sc *sc = arg;
1623 struct i40e_hw *hw = &sc->hw;
1625 bool do_task = FALSE;
1629 reg = rd32(hw, I40E_VFINT_ICR01);
1631 * For masking off interrupt causes that need to be handled before
1632 * they can be re-enabled
1634 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1636 /* Check on the cause */
1637 if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1638 mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1642 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1643 iavf_enable_adminq_irq(hw);
1646 return (FILTER_SCHEDULE_THREAD);
1648 return (FILTER_HANDLED);
1652 iavf_enable_intr(struct ixl_vsi *vsi)
1654 struct i40e_hw *hw = vsi->hw;
1655 struct ixl_rx_queue *que = vsi->rx_queues;
1657 iavf_enable_adminq_irq(hw);
1658 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1659 iavf_enable_queue_irq(hw, que->rxr.me);
1663 iavf_disable_intr(struct ixl_vsi *vsi)
1665 struct i40e_hw *hw = vsi->hw;
1666 struct ixl_rx_queue *que = vsi->rx_queues;
1668 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1669 iavf_disable_queue_irq(hw, que->rxr.me);
1673 iavf_disable_adminq_irq(struct i40e_hw *hw)
1675 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1676 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1678 rd32(hw, I40E_VFGEN_RSTAT);
1682 iavf_enable_adminq_irq(struct i40e_hw *hw)
1684 wr32(hw, I40E_VFINT_DYN_CTL01,
1685 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1686 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1687 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1689 rd32(hw, I40E_VFGEN_RSTAT);
1693 iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1697 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1698 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1699 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1700 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1704 iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1706 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1707 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1708 rd32(hw, I40E_VFGEN_RSTAT);
1712 iavf_configure_tx_itr(struct iavf_sc *sc)
1714 struct i40e_hw *hw = &sc->hw;
1715 struct ixl_vsi *vsi = &sc->vsi;
1716 struct ixl_tx_queue *que = vsi->tx_queues;
1718 vsi->tx_itr_setting = sc->tx_itr;
1720 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1721 struct tx_ring *txr = &que->txr;
1723 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1724 vsi->tx_itr_setting);
1725 txr->itr = vsi->tx_itr_setting;
1726 txr->latency = IXL_AVE_LATENCY;
1731 iavf_configure_rx_itr(struct iavf_sc *sc)
1733 struct i40e_hw *hw = &sc->hw;
1734 struct ixl_vsi *vsi = &sc->vsi;
1735 struct ixl_rx_queue *que = vsi->rx_queues;
1737 vsi->rx_itr_setting = sc->rx_itr;
1739 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1740 struct rx_ring *rxr = &que->rxr;
1742 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1743 vsi->rx_itr_setting);
1744 rxr->itr = vsi->rx_itr_setting;
1745 rxr->latency = IXL_AVE_LATENCY;
1750 * Get initial ITR values from tunable values.
1753 iavf_configure_itr(struct iavf_sc *sc)
1755 iavf_configure_tx_itr(sc);
1756 iavf_configure_rx_itr(sc);
1760 ** Provide a update to the queue RX
1761 ** interrupt moderation value.
1764 iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1766 struct ixl_vsi *vsi = que->vsi;
1767 struct i40e_hw *hw = vsi->hw;
1768 struct rx_ring *rxr = &que->rxr;
1770 /* Idle, do nothing */
1771 if (rxr->bytes == 0)
1774 /* Update the hardware if needed */
1775 if (rxr->itr != vsi->rx_itr_setting) {
1776 rxr->itr = vsi->rx_itr_setting;
1777 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1778 que->rxr.me), rxr->itr);
1783 iavf_msix_que(void *arg)
1785 struct ixl_rx_queue *rx_que = arg;
1789 iavf_set_queue_rx_itr(rx_que);
1790 // iavf_set_queue_tx_itr(que);
1792 return (FILTER_SCHEDULE_THREAD);
1795 /*********************************************************************
1796 * Multicast Initialization
1798 * This routine is called by init to reset a fresh state.
1800 **********************************************************************/
1802 iavf_init_multi(struct iavf_sc *sc)
1804 struct iavf_mac_filter *f;
1807 /* First clear any multicast filters */
1808 SLIST_FOREACH(f, sc->mac_filters, next) {
1809 if ((f->flags & IXL_FILTER_USED)
1810 && (f->flags & IXL_FILTER_MC)) {
1811 f->flags |= IXL_FILTER_DEL;
1816 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1820 ** Note: this routine updates the OS on the link state
1821 ** the real check of the hardware only happens with
1822 ** a link interrupt.
1825 iavf_update_link_status(struct iavf_sc *sc)
1827 struct ixl_vsi *vsi = &sc->vsi;
1831 if (vsi->link_active == FALSE) {
1832 vsi->link_active = TRUE;
1833 baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1834 iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1835 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1837 } else { /* Link down */
1838 if (vsi->link_active == TRUE) {
1839 vsi->link_active = FALSE;
1840 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1845 /*********************************************************************
1847 * This routine disables all traffic on the adapter by issuing a
1848 * global reset on the MAC and deallocates TX/RX buffers.
1850 **********************************************************************/
1853 iavf_stop(struct iavf_sc *sc)
1859 iavf_disable_intr(&sc->vsi);
1861 if (atomic_load_acq_32(&sc->queues_enabled))
1862 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1866 iavf_if_stop(if_ctx_t ctx)
1868 struct iavf_sc *sc = iflib_get_softc(ctx);
1874 iavf_config_rss_reg(struct iavf_sc *sc)
1876 struct i40e_hw *hw = &sc->hw;
1877 struct ixl_vsi *vsi = &sc->vsi;
1879 u64 set_hena = 0, hena;
1881 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1883 u32 rss_hash_config;
1886 /* Don't set up RSS if using a single queue */
1887 if (vsi->num_rx_queues == 1) {
1888 wr32(hw, I40E_VFQF_HENA(0), 0);
1889 wr32(hw, I40E_VFQF_HENA(1), 0);
1895 /* Fetch the configured RSS key */
1896 rss_getkey((uint8_t *) &rss_seed);
1898 ixl_get_default_rss_key(rss_seed);
1901 /* Fill out hash function seed */
1902 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1903 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1905 /* Enable PCTYPES for RSS: */
1907 rss_hash_config = rss_gethashconfig();
1908 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1909 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1910 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1911 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1912 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1913 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1914 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1915 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1916 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1917 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1918 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1919 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1920 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1921 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1923 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1925 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1926 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1928 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1929 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1931 /* Populate the LUT with max no. of queues in round robin fashion */
1932 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1933 if (j == vsi->num_rx_queues)
1937 * Fetch the RSS bucket id for the given indirection entry.
1938 * Cap it at the number of configured buckets (which is
1941 que_id = rss_get_indirection_to_bucket(i);
1942 que_id = que_id % vsi->num_rx_queues;
1946 /* lut = 4-byte sliding window of 4 lut entries */
1947 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1948 /* On i = 3, we have 4 entries in lut; write to the register */
1950 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1951 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1958 iavf_config_rss_pf(struct iavf_sc *sc)
1960 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1962 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1964 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1968 ** iavf_config_rss - setup RSS
1970 ** RSS keys and table are cleared on VF reset.
1973 iavf_config_rss(struct iavf_sc *sc)
1975 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1976 iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1977 iavf_config_rss_reg(sc);
1978 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1979 iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
1980 iavf_config_rss_pf(sc);
1982 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
1986 ** This routine adds new MAC filters to the sc's list;
1987 ** these are later added in hardware by sending a virtual
1991 iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
1993 struct iavf_mac_filter *f;
1995 /* Does one already exist? */
1996 f = iavf_find_mac_filter(sc, macaddr);
1998 iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
1999 MAC_FORMAT_ARGS(macaddr));
2003 /* If not, get a new empty filter */
2004 f = iavf_get_mac_filter(sc);
2006 device_printf(sc->dev, "%s: no filters available!!\n",
2011 iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2012 MAC_FORMAT_ARGS(macaddr));
2014 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2015 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2021 ** Marks a MAC filter for deletion.
2024 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2026 struct iavf_mac_filter *f;
2028 f = iavf_find_mac_filter(sc, macaddr);
2032 f->flags |= IXL_FILTER_DEL;
2037 * Re-uses the name from the PF driver.
2040 iavf_add_device_sysctls(struct iavf_sc *sc)
2042 struct ixl_vsi *vsi = &sc->vsi;
2043 device_t dev = sc->dev;
2045 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2046 struct sysctl_oid_list *ctx_list =
2047 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2048 struct sysctl_oid *debug_node;
2049 struct sysctl_oid_list *debug_list;
2051 SYSCTL_ADD_PROC(ctx, ctx_list,
2052 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
2053 sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2055 SYSCTL_ADD_PROC(ctx, ctx_list,
2056 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
2057 sc, 0, iavf_sysctl_tx_itr, "I",
2058 "Immediately set TX ITR value for all queues");
2060 SYSCTL_ADD_PROC(ctx, ctx_list,
2061 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
2062 sc, 0, iavf_sysctl_rx_itr, "I",
2063 "Immediately set RX ITR value for all queues");
2065 /* Add sysctls meant to print debug information, but don't list them
2066 * in "sysctl -a" output. */
2067 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2068 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
2069 debug_list = SYSCTL_CHILDREN(debug_node);
2071 SYSCTL_ADD_UINT(ctx, debug_list,
2072 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2073 &sc->hw.debug_mask, 0, "Shared code debug message level");
2075 SYSCTL_ADD_UINT(ctx, debug_list,
2076 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2077 &sc->dbg_mask, 0, "Non-shared code debug message level");
2079 SYSCTL_ADD_PROC(ctx, debug_list,
2080 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
2081 sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2083 SYSCTL_ADD_PROC(ctx, debug_list,
2084 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
2085 sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2087 SYSCTL_ADD_PROC(ctx, debug_list,
2088 OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
2089 sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2091 SYSCTL_ADD_PROC(ctx, debug_list,
2092 OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
2093 sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2095 /* Add stats sysctls */
2096 ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2097 ixl_add_queues_sysctls(dev, vsi);
2102 iavf_init_filters(struct iavf_sc *sc)
2104 sc->mac_filters = malloc(sizeof(struct mac_list),
2105 M_IAVF, M_WAITOK | M_ZERO);
2106 SLIST_INIT(sc->mac_filters);
2107 sc->vlan_filters = malloc(sizeof(struct vlan_list),
2108 M_IAVF, M_WAITOK | M_ZERO);
2109 SLIST_INIT(sc->vlan_filters);
2113 iavf_free_filters(struct iavf_sc *sc)
2115 struct iavf_mac_filter *f;
2116 struct iavf_vlan_filter *v;
2118 while (!SLIST_EMPTY(sc->mac_filters)) {
2119 f = SLIST_FIRST(sc->mac_filters);
2120 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2123 free(sc->mac_filters, M_IAVF);
2124 while (!SLIST_EMPTY(sc->vlan_filters)) {
2125 v = SLIST_FIRST(sc->vlan_filters);
2126 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2129 free(sc->vlan_filters, M_IAVF);
2133 iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2147 switch (link_speed) {
2148 case VIRTCHNL_LINK_SPEED_100MB:
2151 case VIRTCHNL_LINK_SPEED_1GB:
2154 case VIRTCHNL_LINK_SPEED_10GB:
2157 case VIRTCHNL_LINK_SPEED_40GB:
2160 case VIRTCHNL_LINK_SPEED_20GB:
2163 case VIRTCHNL_LINK_SPEED_25GB:
2166 case VIRTCHNL_LINK_SPEED_UNKNOWN:
2172 return speeds[index];
2176 iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2178 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2181 error = sysctl_handle_string(oidp,
2182 iavf_vc_speed_to_string(sc->link_speed),
2188 * Sanity check and save off tunable values.
2191 iavf_save_tunables(struct iavf_sc *sc)
2193 device_t dev = sc->dev;
2195 /* Save tunable information */
2196 sc->dbg_mask = iavf_core_debug_mask;
2197 sc->hw.debug_mask = iavf_shared_debug_mask;
2198 sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2200 if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2201 device_printf(dev, "Invalid tx_itr value of %d set!\n",
2203 device_printf(dev, "tx_itr must be between %d and %d, "
2206 device_printf(dev, "Using default value of %d instead\n",
2208 sc->tx_itr = IXL_ITR_4K;
2210 sc->tx_itr = iavf_tx_itr;
2212 if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2213 device_printf(dev, "Invalid rx_itr value of %d set!\n",
2215 device_printf(dev, "rx_itr must be between %d and %d, "
2218 device_printf(dev, "Using default value of %d instead\n",
2220 sc->rx_itr = IXL_ITR_8K;
2222 sc->rx_itr = iavf_rx_itr;
2226 * Used to set the Tx ITR value for all of the VF's queues.
2227 * Writes to the ITR registers immediately.
2230 iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2232 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2233 device_t dev = sc->dev;
2234 int requested_tx_itr;
2237 requested_tx_itr = sc->tx_itr;
2238 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2239 if ((error) || (req->newptr == NULL))
2241 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2243 "Invalid TX itr value; value must be between 0 and %d\n",
2248 sc->tx_itr = requested_tx_itr;
2249 iavf_configure_tx_itr(sc);
2255 * Used to set the Rx ITR value for all of the VF's queues.
2256 * Writes to the ITR registers immediately.
2259 iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2261 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2262 device_t dev = sc->dev;
2263 int requested_rx_itr;
2266 requested_rx_itr = sc->rx_itr;
2267 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2268 if ((error) || (req->newptr == NULL))
2270 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2272 "Invalid RX itr value; value must be between 0 and %d\n",
2277 sc->rx_itr = requested_rx_itr;
2278 iavf_configure_rx_itr(sc);
2284 iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2286 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2287 struct iavf_mac_filter *f;
2288 struct iavf_vlan_filter *v;
2289 device_t dev = sc->dev;
2290 int ftl_len, ftl_counter = 0, error = 0;
2293 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2295 device_printf(dev, "Could not allocate sbuf for output.\n");
2299 sbuf_printf(buf, "\n");
2301 /* Print MAC filters */
2302 sbuf_printf(buf, "MAC Filters:\n");
2304 SLIST_FOREACH(f, sc->mac_filters, next)
2307 sbuf_printf(buf, "(none)\n");
2309 SLIST_FOREACH(f, sc->mac_filters, next) {
2311 MAC_FORMAT ", flags %#06x\n",
2312 MAC_FORMAT_ARGS(f->macaddr), f->flags);
2316 /* Print VLAN filters */
2317 sbuf_printf(buf, "VLAN Filters:\n");
2319 SLIST_FOREACH(v, sc->vlan_filters, next)
2322 sbuf_printf(buf, "(none)");
2324 SLIST_FOREACH(v, sc->vlan_filters, next) {
2328 /* don't print '\n' for last entry */
2329 if (++ftl_counter != ftl_len)
2330 sbuf_printf(buf, "\n");
2334 error = sbuf_finish(buf);
2336 device_printf(dev, "Error finishing sbuf: %d\n", error);
2343 * Print out mapping of TX queue indexes and Rx queue indexes
2347 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2349 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2350 struct ixl_vsi *vsi = &sc->vsi;
2351 device_t dev = sc->dev;
2355 struct ixl_rx_queue *rx_que = vsi->rx_queues;
2356 struct ixl_tx_queue *tx_que = vsi->tx_queues;
2358 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2360 device_printf(dev, "Could not allocate sbuf for output.\n");
2364 sbuf_cat(buf, "\n");
2365 for (int i = 0; i < vsi->num_rx_queues; i++) {
2366 rx_que = &vsi->rx_queues[i];
2367 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2369 for (int i = 0; i < vsi->num_tx_queues; i++) {
2370 tx_que = &vsi->tx_queues[i];
2371 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2374 error = sbuf_finish(buf);
2376 device_printf(dev, "Error finishing sbuf: %d\n", error);
2382 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2384 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2386 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2387 int do_reset = 0, error = 0;
2389 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2390 if ((error) || (req->newptr == NULL))
2393 if (do_reset == 1) {
2395 if (CTX_ACTIVE(sc->vsi.ctx))
2396 iflib_request_reset(sc->vsi.ctx);
2403 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2405 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2406 device_t dev = sc->dev;
2407 int do_reset = 0, error = 0;
2409 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2410 if ((error) || (req->newptr == NULL))
2413 if (do_reset == 1) {
2414 if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2415 device_printf(dev, "PCIE FLR failed\n");
2418 else if (CTX_ACTIVE(sc->vsi.ctx))
2419 iflib_request_reset(sc->vsi.ctx);