1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 /*********************************************************************
39 *********************************************************************/
40 #define IAVF_DRIVER_VERSION_MAJOR 2
41 #define IAVF_DRIVER_VERSION_MINOR 0
42 #define IAVF_DRIVER_VERSION_BUILD 0
44 #define IAVF_DRIVER_VERSION_STRING \
45 __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \
46 __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \
47 __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
49 /*********************************************************************
52 * Used by probe to select devices to load on
54 * ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
57 static pci_vendor_info_t iavf_vendor_info_array[] =
59 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62 /* required last entry */
66 /*********************************************************************
68 *********************************************************************/
69 static void *iavf_register(device_t dev);
70 static int iavf_if_attach_pre(if_ctx_t ctx);
71 static int iavf_if_attach_post(if_ctx_t ctx);
72 static int iavf_if_detach(if_ctx_t ctx);
73 static int iavf_if_shutdown(if_ctx_t ctx);
74 static int iavf_if_suspend(if_ctx_t ctx);
75 static int iavf_if_resume(if_ctx_t ctx);
76 static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77 static void iavf_if_enable_intr(if_ctx_t ctx);
78 static void iavf_if_disable_intr(if_ctx_t ctx);
79 static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80 static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81 static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82 static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83 static void iavf_if_queues_free(if_ctx_t ctx);
84 static void iavf_if_update_admin_status(if_ctx_t ctx);
85 static void iavf_if_multi_set(if_ctx_t ctx);
86 static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87 static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88 static int iavf_if_media_change(if_ctx_t ctx);
89 static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
90 static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91 static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92 static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93 static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94 static void iavf_if_stop(if_ctx_t ctx);
96 static int iavf_allocate_pci_resources(struct iavf_sc *);
97 static int iavf_reset_complete(struct i40e_hw *);
98 static int iavf_setup_vc(struct iavf_sc *);
99 static int iavf_reset(struct iavf_sc *);
100 static int iavf_vf_config(struct iavf_sc *);
101 static void iavf_init_filters(struct iavf_sc *);
102 static void iavf_free_pci_resources(struct iavf_sc *);
103 static void iavf_free_filters(struct iavf_sc *);
104 static void iavf_setup_interface(device_t, struct iavf_sc *);
105 static void iavf_add_device_sysctls(struct iavf_sc *);
106 static void iavf_enable_adminq_irq(struct i40e_hw *);
107 static void iavf_disable_adminq_irq(struct i40e_hw *);
108 static void iavf_enable_queue_irq(struct i40e_hw *, int);
109 static void iavf_disable_queue_irq(struct i40e_hw *, int);
110 static void iavf_config_rss(struct iavf_sc *);
111 static void iavf_stop(struct iavf_sc *);
113 static int iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
114 static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
115 static int iavf_msix_que(void *);
116 static int iavf_msix_adminq(void *);
117 //static void iavf_del_multi(struct iavf_sc *sc);
118 static void iavf_init_multi(struct iavf_sc *sc);
119 static void iavf_configure_itr(struct iavf_sc *sc);
121 static int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
122 static int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
123 static int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
124 static int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
125 static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
126 static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
127 static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
129 char *iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed);
130 static void iavf_save_tunables(struct iavf_sc *);
131 static enum i40e_status_code
132 iavf_process_adminq(struct iavf_sc *, u16 *);
133 static int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
134 static int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
136 /*********************************************************************
137 * FreeBSD Device Interface Entry Points
138 *********************************************************************/
140 static device_method_t iavf_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_register, iavf_register),
143 DEVMETHOD(device_probe, iflib_device_probe),
144 DEVMETHOD(device_attach, iflib_device_attach),
145 DEVMETHOD(device_detach, iflib_device_detach),
146 DEVMETHOD(device_shutdown, iflib_device_shutdown),
150 static driver_t iavf_driver = {
151 "iavf", iavf_methods, sizeof(struct iavf_sc),
154 devclass_t iavf_devclass;
155 DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
156 MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
157 pci, iavf, iavf_vendor_info_array,
158 nitems(iavf_vendor_info_array) - 1);
159 MODULE_VERSION(iavf, 1);
161 MODULE_DEPEND(iavf, pci, 1, 1, 1);
162 MODULE_DEPEND(iavf, ether, 1, 1, 1);
163 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
165 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
167 static device_method_t iavf_if_methods[] = {
168 DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
169 DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
170 DEVMETHOD(ifdi_detach, iavf_if_detach),
171 DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
172 DEVMETHOD(ifdi_suspend, iavf_if_suspend),
173 DEVMETHOD(ifdi_resume, iavf_if_resume),
174 DEVMETHOD(ifdi_init, iavf_if_init),
175 DEVMETHOD(ifdi_stop, iavf_if_stop),
176 DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
177 DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
178 DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
179 DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
181 DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
182 DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
183 DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
184 DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
185 DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
186 DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
187 DEVMETHOD(ifdi_media_status, iavf_if_media_status),
188 DEVMETHOD(ifdi_media_change, iavf_if_media_change),
189 DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
190 DEVMETHOD(ifdi_timer, iavf_if_timer),
191 DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
192 DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
193 DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
197 static driver_t iavf_if_driver = {
198 "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
202 ** TUNEABLE PARAMETERS:
205 static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0,
206 "iavf driver parameters");
209 * Different method for processing TX descriptor
212 static int iavf_enable_head_writeback = 0;
213 TUNABLE_INT("hw.iavf.enable_head_writeback",
214 &iavf_enable_head_writeback);
215 SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
216 &iavf_enable_head_writeback, 0,
217 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
219 static int iavf_core_debug_mask = 0;
220 TUNABLE_INT("hw.iavf.core_debug_mask",
221 &iavf_core_debug_mask);
222 SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
223 &iavf_core_debug_mask, 0,
224 "Display debug statements that are printed in non-shared code");
226 static int iavf_shared_debug_mask = 0;
227 TUNABLE_INT("hw.iavf.shared_debug_mask",
228 &iavf_shared_debug_mask);
229 SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
230 &iavf_shared_debug_mask, 0,
231 "Display debug statements that are printed in shared code");
233 int iavf_rx_itr = IXL_ITR_8K;
234 TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
235 SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
236 &iavf_rx_itr, 0, "RX Interrupt Rate");
238 int iavf_tx_itr = IXL_ITR_4K;
239 TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
240 SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
241 &iavf_tx_itr, 0, "TX Interrupt Rate");
243 extern struct if_txrx ixl_txrx_hwb;
244 extern struct if_txrx ixl_txrx_dwb;
246 static struct if_shared_ctx iavf_sctx_init = {
247 .isc_magic = IFLIB_MAGIC,
248 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
249 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
250 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
251 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
252 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
253 .isc_rx_maxsize = 16384,
254 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
255 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
260 .isc_admin_intrcnt = 1,
261 .isc_vendor_info = iavf_vendor_info_array,
262 .isc_driver_version = IAVF_DRIVER_VERSION_STRING,
263 .isc_driver = &iavf_if_driver,
264 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_IS_VF,
266 .isc_nrxd_min = {IXL_MIN_RING},
267 .isc_ntxd_min = {IXL_MIN_RING},
268 .isc_nrxd_max = {IXL_MAX_RING},
269 .isc_ntxd_max = {IXL_MAX_RING},
270 .isc_nrxd_default = {IXL_DEFAULT_RING},
271 .isc_ntxd_default = {IXL_DEFAULT_RING},
274 if_shared_ctx_t iavf_sctx = &iavf_sctx_init;
278 iavf_register(device_t dev)
284 iavf_allocate_pci_resources(struct iavf_sc *sc)
286 struct i40e_hw *hw = &sc->hw;
287 device_t dev = iflib_get_dev(sc->vsi.ctx);
292 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
295 if (!(sc->pci_mem)) {
296 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
300 /* Save off the PCI information */
301 hw->vendor_id = pci_get_vendor(dev);
302 hw->device_id = pci_get_device(dev);
303 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
304 hw->subsystem_vendor_id =
305 pci_read_config(dev, PCIR_SUBVEND_0, 2);
306 hw->subsystem_device_id =
307 pci_read_config(dev, PCIR_SUBDEV_0, 2);
309 hw->bus.device = pci_get_slot(dev);
310 hw->bus.func = pci_get_function(dev);
312 /* Save off register access information */
313 sc->osdep.mem_bus_space_tag =
314 rman_get_bustag(sc->pci_mem);
315 sc->osdep.mem_bus_space_handle =
316 rman_get_bushandle(sc->pci_mem);
317 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
318 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
321 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
322 sc->hw.back = &sc->osdep;
328 iavf_if_attach_pre(if_ctx_t ctx)
334 if_softc_ctx_t scctx;
337 dev = iflib_get_dev(ctx);
338 sc = iflib_get_softc(ctx);
349 vsi->media = iflib_get_media(ctx);
350 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
352 iavf_save_tunables(sc);
354 /* Do PCI setup - map BAR0, etc */
355 if (iavf_allocate_pci_resources(sc)) {
356 device_printf(dev, "%s: Allocation of PCI resources failed\n",
362 iavf_dbg_init(sc, "Allocated PCI resources and MSIX vectors\n");
365 * XXX: This is called by init_shared_code in the PF driver,
366 * but the rest of that function does not support VFs.
368 error = i40e_set_mac_type(hw);
370 device_printf(dev, "%s: set_mac_type failed: %d\n",
375 error = iavf_reset_complete(hw);
377 device_printf(dev, "%s: Device is still being reset\n",
382 iavf_dbg_init(sc, "VF Device is ready for configuration\n");
384 /* Sets up Admin Queue */
385 error = iavf_setup_vc(sc);
387 device_printf(dev, "%s: Error setting up PF comms, %d\n",
392 iavf_dbg_init(sc, "PF API version verified\n");
394 /* Need API version before sending reset message */
395 error = iavf_reset(sc);
397 device_printf(dev, "VF reset failed; reload the driver\n");
401 iavf_dbg_init(sc, "VF reset complete\n");
403 /* Ask for VF config from PF */
404 error = iavf_vf_config(sc);
406 device_printf(dev, "Error getting configuration from PF: %d\n",
411 device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
412 sc->vf_res->num_vsis,
413 sc->vf_res->num_queue_pairs,
414 sc->vf_res->max_vectors,
415 sc->vf_res->rss_key_size,
416 sc->vf_res->rss_lut_size);
417 iavf_dbg_info(sc, "Capabilities=%b\n",
418 sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
420 /* got VF config message back from PF, now we can parse it */
421 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
422 /* XXX: We only use the first VSI we find */
423 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
424 sc->vsi_res = &sc->vf_res->vsi_res[i];
427 device_printf(dev, "%s: no LAN VSI found\n", __func__);
431 vsi->id = sc->vsi_res->vsi_id;
433 iavf_dbg_init(sc, "Resource Acquisition complete\n");
435 /* If no mac address was assigned just make a random one */
436 if (!iavf_check_ether_addr(hw->mac.addr)) {
437 u8 addr[ETHER_ADDR_LEN];
438 arc4rand(&addr, sizeof(addr), 0);
441 bcopy(addr, hw->mac.addr, sizeof(addr));
443 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
444 iflib_set_mac(ctx, hw->mac.addr);
446 /* Allocate filter lists */
447 iavf_init_filters(sc);
449 /* Fill out more iflib parameters */
450 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
451 sc->vsi_res->num_queue_pairs;
452 if (vsi->enable_head_writeback) {
453 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
454 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
455 scctx->isc_txrx = &ixl_txrx_hwb;
457 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
458 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
459 scctx->isc_txrx = &ixl_txrx_dwb;
461 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
462 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
463 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
464 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
465 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
466 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
467 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
468 scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
469 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
470 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
475 free(sc->vf_res, M_IAVF);
477 i40e_shutdown_adminq(hw);
479 iavf_free_pci_resources(sc);
485 iavf_if_attach_post(if_ctx_t ctx)
493 INIT_DBG_DEV(dev, "begin");
495 dev = iflib_get_dev(ctx);
496 sc = iflib_get_softc(ctx);
498 vsi->ifp = iflib_get_ifp(ctx);
501 /* Save off determined number of queues for interface */
502 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
503 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
505 /* Setup the stack interface */
506 iavf_setup_interface(dev, sc);
508 INIT_DBG_DEV(dev, "Interface setup complete");
510 /* Initialize statistics & add sysctls */
511 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
512 iavf_add_device_sysctls(sc);
514 sc->init_state = IAVF_INIT_READY;
515 atomic_store_rel_32(&sc->queues_enabled, 0);
517 /* We want AQ enabled early for init */
518 iavf_enable_adminq_irq(hw);
520 INIT_DBG_DEV(dev, "end");
526 * XXX: iflib always ignores the return value of detach()
527 * -> This means that this isn't allowed to fail
530 iavf_if_detach(if_ctx_t ctx)
532 struct iavf_sc *sc = iflib_get_softc(ctx);
533 struct ixl_vsi *vsi = &sc->vsi;
534 struct i40e_hw *hw = &sc->hw;
535 device_t dev = sc->dev;
536 enum i40e_status_code status;
538 INIT_DBG_DEV(dev, "begin");
540 /* Remove all the media and link information */
541 ifmedia_removeall(vsi->media);
543 iavf_disable_adminq_irq(hw);
544 status = i40e_shutdown_adminq(&sc->hw);
545 if (status != I40E_SUCCESS) {
547 "i40e_shutdown_adminq() failed with status %s\n",
548 i40e_stat_str(hw, status));
551 free(sc->vf_res, M_IAVF);
552 iavf_free_pci_resources(sc);
553 iavf_free_filters(sc);
555 INIT_DBG_DEV(dev, "end");
560 iavf_if_shutdown(if_ctx_t ctx)
566 iavf_if_suspend(if_ctx_t ctx)
572 iavf_if_resume(if_ctx_t ctx)
578 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
581 if_ctx_t ctx = sc->vsi.ctx;
583 error = ixl_vc_send_cmd(sc, op);
585 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
589 /* Don't wait for a response if the device is being detached. */
590 if (!iflib_in_detach(ctx)) {
591 iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
592 error = sx_sleep(ixl_vc_get_op_chan(sc, op),
593 iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
595 if (error == EWOULDBLOCK)
596 device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
603 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
607 error = ixl_vc_send_cmd(sc, op);
609 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
615 iavf_init_queues(struct ixl_vsi *vsi)
617 if_softc_ctx_t scctx = vsi->shared;
618 struct ixl_tx_queue *tx_que = vsi->tx_queues;
619 struct ixl_rx_queue *rx_que = vsi->rx_queues;
622 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
623 ixl_init_tx_ring(vsi, tx_que);
625 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
628 if (scctx->isc_max_frame_size <= MCLBYTES)
629 rxr->mbuf_sz = MCLBYTES;
631 rxr->mbuf_sz = MJUMPAGESIZE;
633 wr32(vsi->hw, rxr->tail, 0);
638 iavf_if_init(if_ctx_t ctx)
640 struct iavf_sc *sc = iflib_get_softc(ctx);
641 struct ixl_vsi *vsi = &sc->vsi;
642 struct i40e_hw *hw = &sc->hw;
643 struct ifnet *ifp = iflib_get_ifp(ctx);
644 u8 tmpaddr[ETHER_ADDR_LEN];
647 INIT_DBG_IF(ifp, "begin");
649 MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
651 error = iavf_reset_complete(hw);
653 device_printf(sc->dev, "%s: VF reset failed\n",
657 if (!i40e_check_asq_alive(hw)) {
658 iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
659 pci_enable_busmaster(sc->dev);
660 i40e_shutdown_adminq(hw);
661 i40e_init_adminq(hw);
664 /* Make sure queues are disabled */
665 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
667 bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
668 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
669 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
670 error = iavf_del_mac_filter(sc, hw->mac.addr);
672 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
674 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
677 error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
678 if (!error || error == EEXIST)
679 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
680 iflib_set_mac(ctx, hw->mac.addr);
682 /* Prepare the queues for operation */
683 iavf_init_queues(vsi);
685 /* Set initial ITR values */
686 iavf_configure_itr(sc);
688 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
694 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
696 /* Init SW TX ring indices */
697 if (vsi->enable_head_writeback)
698 ixl_init_tx_cidx(vsi);
700 ixl_init_tx_rsqs(vsi);
702 /* Configure promiscuous mode */
703 iavf_if_promisc_set(ctx, if_getflags(ifp));
706 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
708 sc->init_state = IAVF_RUNNING;
712 * iavf_attach() helper function; initalizes the admin queue
713 * and attempts to establish contact with the PF by
714 * retrying the initial "API version" message several times
715 * or until the PF responds.
718 iavf_setup_vc(struct iavf_sc *sc)
720 struct i40e_hw *hw = &sc->hw;
721 device_t dev = sc->dev;
722 int error = 0, ret_error = 0, asq_retries = 0;
723 bool send_api_ver_retried = 0;
725 /* Need to set these AQ paramters before initializing AQ */
726 hw->aq.num_arq_entries = IXL_AQ_LEN;
727 hw->aq.num_asq_entries = IXL_AQ_LEN;
728 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
729 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
731 for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
732 /* Initialize admin queue */
733 error = i40e_init_adminq(hw);
735 device_printf(dev, "%s: init_adminq failed: %d\n",
741 iavf_dbg_init(sc, "Initialized Admin Queue; starting"
742 " send_api_ver attempt %d", i+1);
745 /* Send VF's API version */
746 error = iavf_send_api_ver(sc);
748 i40e_shutdown_adminq(hw);
750 device_printf(dev, "%s: unable to send api"
751 " version to PF on attempt %d, error %d\n",
752 __func__, i+1, error);
756 while (!i40e_asq_done(hw)) {
757 if (++asq_retries > IAVF_AQ_MAX_ERR) {
758 i40e_shutdown_adminq(hw);
759 device_printf(dev, "Admin Queue timeout "
760 "(waiting for send_api_ver), %d more tries...\n",
761 IAVF_AQ_MAX_ERR - (i + 1));
767 if (asq_retries > IAVF_AQ_MAX_ERR)
770 iavf_dbg_init(sc, "Sent API version message to PF");
772 /* Verify that the VF accepts the PF's API version */
773 error = iavf_verify_api_ver(sc);
774 if (error == ETIMEDOUT) {
775 if (!send_api_ver_retried) {
776 /* Resend message, one more time */
777 send_api_ver_retried = true;
779 "%s: Timeout while verifying API version on first"
780 " try!\n", __func__);
784 "%s: Timeout while verifying API version on second"
785 " try!\n", __func__);
792 "%s: Unable to verify API version,"
793 " error %s\n", __func__, i40e_stat_str(hw, error));
800 i40e_shutdown_adminq(hw);
805 * iavf_attach() helper function; asks the PF for this VF's
806 * configuration, and saves the information if it receives it.
809 iavf_vf_config(struct iavf_sc *sc)
811 struct i40e_hw *hw = &sc->hw;
812 device_t dev = sc->dev;
813 int bufsz, error = 0, ret_error = 0;
814 int asq_retries, retried = 0;
817 error = iavf_send_vf_config_msg(sc);
820 "%s: Unable to send VF config request, attempt %d,"
821 " error %d\n", __func__, retried + 1, error);
826 while (!i40e_asq_done(hw)) {
827 if (++asq_retries > IAVF_AQ_MAX_ERR) {
828 device_printf(dev, "%s: Admin Queue timeout "
829 "(waiting for send_vf_config_msg), attempt %d\n",
830 __func__, retried + 1);
837 iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
841 bufsz = sizeof(struct virtchnl_vf_resource) +
842 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
843 sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
846 "%s: Unable to allocate memory for VF configuration"
847 " message from PF on attempt %d\n", __func__, retried + 1);
853 /* Check for VF config response */
854 error = iavf_get_vf_config(sc);
855 if (error == ETIMEDOUT) {
856 /* The 1st time we timeout, send the configuration message again */
862 "%s: iavf_get_vf_config() timed out waiting for a response\n",
867 "%s: Unable to get VF configuration from PF after %d tries!\n",
868 __func__, retried + 1);
874 free(sc->vf_res, M_IAVF);
880 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
882 struct iavf_sc *sc = iflib_get_softc(ctx);
883 struct ixl_vsi *vsi = &sc->vsi;
884 struct ixl_rx_queue *rx_que = vsi->rx_queues;
885 struct ixl_tx_queue *tx_que = vsi->tx_queues;
886 int err, i, rid, vector = 0;
889 MPASS(vsi->shared->isc_nrxqsets > 0);
890 MPASS(vsi->shared->isc_ntxqsets > 0);
892 /* Admin Que is vector 0*/
894 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
895 iavf_msix_adminq, sc, 0, "aq");
897 iflib_irq_free(ctx, &vsi->irq);
898 device_printf(iflib_get_dev(ctx),
899 "Failed to register Admin Que handler");
903 /* Now set up the stations */
904 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
907 snprintf(buf, sizeof(buf), "rxq%d", i);
908 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
909 IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
910 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
911 * what's expected in the iflib context? */
913 device_printf(iflib_get_dev(ctx),
914 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
915 vsi->num_rx_queues = i + 1;
918 rx_que->msix = vector;
921 bzero(buf, sizeof(buf));
923 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
924 snprintf(buf, sizeof(buf), "txq%d", i);
925 iflib_softirq_alloc_generic(ctx,
926 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
927 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
929 /* TODO: Maybe call a strategy function for this to figure out which
930 * interrupts to map Tx queues to. I don't know if there's an immediately
931 * better way than this other than a user-supplied map, though. */
932 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
937 iflib_irq_free(ctx, &vsi->irq);
938 rx_que = vsi->rx_queues;
939 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
940 iflib_irq_free(ctx, &rx_que->que_irq);
944 /* Enable all interrupts */
946 iavf_if_enable_intr(if_ctx_t ctx)
948 struct iavf_sc *sc = iflib_get_softc(ctx);
949 struct ixl_vsi *vsi = &sc->vsi;
951 iavf_enable_intr(vsi);
954 /* Disable all interrupts */
956 iavf_if_disable_intr(if_ctx_t ctx)
958 struct iavf_sc *sc = iflib_get_softc(ctx);
959 struct ixl_vsi *vsi = &sc->vsi;
961 iavf_disable_intr(vsi);
965 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
967 struct iavf_sc *sc = iflib_get_softc(ctx);
968 struct ixl_vsi *vsi = &sc->vsi;
969 struct i40e_hw *hw = vsi->hw;
970 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
972 iavf_enable_queue_irq(hw, rx_que->msix - 1);
977 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
979 struct iavf_sc *sc = iflib_get_softc(ctx);
980 struct ixl_vsi *vsi = &sc->vsi;
981 struct i40e_hw *hw = vsi->hw;
982 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
984 iavf_enable_queue_irq(hw, tx_que->msix - 1);
989 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
991 struct iavf_sc *sc = iflib_get_softc(ctx);
992 struct ixl_vsi *vsi = &sc->vsi;
993 if_softc_ctx_t scctx = vsi->shared;
994 struct ixl_tx_queue *que;
997 MPASS(scctx->isc_ntxqsets > 0);
999 MPASS(scctx->isc_ntxqsets == ntxqsets);
1001 /* Allocate queue structure memory */
1002 if (!(vsi->tx_queues =
1003 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1004 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1008 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1009 struct tx_ring *txr = &que->txr;
1014 if (!vsi->enable_head_writeback) {
1015 /* Allocate report status array */
1016 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1017 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1021 /* Init report status array */
1022 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1023 txr->tx_rsq[j] = QIDX_INVALID;
1025 /* get the virtual and physical address of the hardware queues */
1026 txr->tail = I40E_QTX_TAIL1(txr->me);
1027 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1028 txr->tx_paddr = paddrs[i * ntxqs];
1034 iavf_if_queues_free(ctx);
1039 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1041 struct iavf_sc *sc = iflib_get_softc(ctx);
1042 struct ixl_vsi *vsi = &sc->vsi;
1043 struct ixl_rx_queue *que;
1047 if_softc_ctx_t scctx = vsi->shared;
1048 MPASS(scctx->isc_nrxqsets > 0);
1050 MPASS(scctx->isc_nrxqsets == nrxqsets);
1053 /* Allocate queue structure memory */
1054 if (!(vsi->rx_queues =
1055 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1056 nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1057 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1062 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1063 struct rx_ring *rxr = &que->rxr;
1068 /* get the virtual and physical address of the hardware queues */
1069 rxr->tail = I40E_QRX_TAIL1(rxr->me);
1070 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1071 rxr->rx_paddr = paddrs[i * nrxqs];
1077 iavf_if_queues_free(ctx);
1082 iavf_if_queues_free(if_ctx_t ctx)
1084 struct iavf_sc *sc = iflib_get_softc(ctx);
1085 struct ixl_vsi *vsi = &sc->vsi;
1087 if (!vsi->enable_head_writeback) {
1088 struct ixl_tx_queue *que;
1091 for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1092 struct tx_ring *txr = &que->txr;
1093 if (txr->tx_rsq != NULL) {
1094 free(txr->tx_rsq, M_IAVF);
1100 if (vsi->tx_queues != NULL) {
1101 free(vsi->tx_queues, M_IAVF);
1102 vsi->tx_queues = NULL;
1104 if (vsi->rx_queues != NULL) {
1105 free(vsi->rx_queues, M_IAVF);
1106 vsi->rx_queues = NULL;
1111 iavf_check_aq_errors(struct iavf_sc *sc)
1113 struct i40e_hw *hw = &sc->hw;
1114 device_t dev = sc->dev;
1116 u8 aq_error = false;
1118 /* check for Admin queue errors */
1119 oldreg = reg = rd32(hw, hw->aq.arq.len);
1120 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1121 device_printf(dev, "ARQ VF Error detected\n");
1122 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1125 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1126 device_printf(dev, "ARQ Overflow Error detected\n");
1127 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1130 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1131 device_printf(dev, "ARQ Critical Error detected\n");
1132 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1136 wr32(hw, hw->aq.arq.len, reg);
1138 oldreg = reg = rd32(hw, hw->aq.asq.len);
1139 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1140 device_printf(dev, "ASQ VF Error detected\n");
1141 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1144 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1145 device_printf(dev, "ASQ Overflow Error detected\n");
1146 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1149 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1150 device_printf(dev, "ASQ Critical Error detected\n");
1151 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1155 wr32(hw, hw->aq.asq.len, reg);
1158 device_printf(dev, "WARNING: Stopping VF!\n");
1160 * A VF reset might not be enough to fix a problem here;
1161 * a PF reset could be required.
1163 sc->init_state = IAVF_RESET_REQUIRED;
1165 iavf_request_reset(sc);
1168 return (aq_error ? EIO : 0);
1171 static enum i40e_status_code
1172 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1174 enum i40e_status_code status = I40E_SUCCESS;
1175 struct i40e_arq_event_info event;
1176 struct i40e_hw *hw = &sc->hw;
1177 struct virtchnl_msg *v_msg;
1178 int error = 0, loop = 0;
1181 error = iavf_check_aq_errors(sc);
1183 return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1185 event.buf_len = IXL_AQ_BUF_SZ;
1186 event.msg_buf = sc->aq_buffer;
1187 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1188 v_msg = (struct virtchnl_msg *)&event.desc;
1190 /* clean and process any events */
1192 status = i40e_clean_arq_element(hw, &event, pending);
1194 * Also covers normal case when i40e_clean_arq_element()
1195 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1199 iavf_vc_completion(sc, v_msg->v_opcode,
1200 v_msg->v_retval, event.msg_buf, event.msg_len);
1201 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1202 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1204 /* Re-enable admin queue interrupt cause */
1205 reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1206 reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1207 wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1213 iavf_if_update_admin_status(if_ctx_t ctx)
1215 struct iavf_sc *sc = iflib_get_softc(ctx);
1216 struct i40e_hw *hw = &sc->hw;
1219 iavf_process_adminq(sc, &pending);
1220 iavf_update_link_status(sc);
1223 * If there are still messages to process, reschedule.
1224 * Otherwise, re-enable the Admin Queue interrupt.
1227 iflib_admin_intr_deferred(ctx);
1229 iavf_enable_adminq_irq(hw);
1233 iavf_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1235 struct iavf_sc *sc = arg;
1238 if (ifma->ifma_addr->sa_family != AF_LINK)
1240 error = iavf_add_mac_filter(sc,
1241 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1248 iavf_if_multi_set(if_ctx_t ctx)
1250 struct iavf_sc *sc = iflib_get_softc(ctx);
1253 IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1255 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1256 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1257 /* Delete MC filters and enable mulitcast promisc instead */
1258 iavf_init_multi(sc);
1259 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1260 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1264 /* If there aren't too many filters, delete existing MC filters */
1265 iavf_init_multi(sc);
1267 /* And (re-)install filters for all mcast addresses */
1268 mcnt = if_multi_apply(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc);
1271 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1275 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1277 struct iavf_sc *sc = iflib_get_softc(ctx);
1278 struct ixl_vsi *vsi = &sc->vsi;
1280 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1281 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1282 ETHER_VLAN_ENCAP_LEN)
1285 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1286 ETHER_VLAN_ENCAP_LEN;
1292 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1295 struct ifnet *ifp = iflib_get_ifp(ctx);
1297 struct iavf_sc *sc = iflib_get_softc(ctx);
1299 INIT_DBG_IF(ifp, "begin");
1301 iavf_update_link_status(sc);
1303 ifmr->ifm_status = IFM_AVALID;
1304 ifmr->ifm_active = IFM_ETHER;
1309 ifmr->ifm_status |= IFM_ACTIVE;
1310 /* Hardware is always full-duplex */
1311 ifmr->ifm_active |= IFM_FDX;
1313 /* Based on the link speed reported by the PF over the AdminQ, choose a
1314 * PHY type to report. This isn't 100% correct since we don't really
1315 * know the underlying PHY type of the PF, but at least we can report
1316 * a valid link speed...
1318 switch (sc->link_speed) {
1319 case VIRTCHNL_LINK_SPEED_100MB:
1320 ifmr->ifm_active |= IFM_100_TX;
1322 case VIRTCHNL_LINK_SPEED_1GB:
1323 ifmr->ifm_active |= IFM_1000_T;
1325 case VIRTCHNL_LINK_SPEED_10GB:
1326 ifmr->ifm_active |= IFM_10G_SR;
1328 case VIRTCHNL_LINK_SPEED_20GB:
1329 case VIRTCHNL_LINK_SPEED_25GB:
1330 ifmr->ifm_active |= IFM_25G_SR;
1332 case VIRTCHNL_LINK_SPEED_40GB:
1333 ifmr->ifm_active |= IFM_40G_SR4;
1336 ifmr->ifm_active |= IFM_UNKNOWN;
1340 INIT_DBG_IF(ifp, "end");
1344 iavf_if_media_change(if_ctx_t ctx)
1346 struct ifmedia *ifm = iflib_get_media(ctx);
1348 INIT_DEBUGOUT("ixl_media_change: begin");
1350 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1353 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1358 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1360 struct iavf_sc *sc = iflib_get_softc(ctx);
1361 struct ifnet *ifp = iflib_get_ifp(ctx);
1363 sc->promisc_flags = 0;
1365 if (flags & IFF_ALLMULTI ||
1366 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1367 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1368 if (flags & IFF_PROMISC)
1369 sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1371 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1377 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1379 struct iavf_sc *sc = iflib_get_softc(ctx);
1380 struct i40e_hw *hw = &sc->hw;
1386 /* Check for when PF triggers a VF reset */
1387 val = rd32(hw, I40E_VFGEN_RSTAT) &
1388 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1389 if (val != VIRTCHNL_VFR_VFACTIVE
1390 && val != VIRTCHNL_VFR_COMPLETED) {
1391 iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1395 /* Fire off the adminq task */
1396 iflib_admin_intr_deferred(ctx);
1399 iavf_request_stats(sc);
1403 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1405 struct iavf_sc *sc = iflib_get_softc(ctx);
1406 struct ixl_vsi *vsi = &sc->vsi;
1407 struct iavf_vlan_filter *v;
1409 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1413 v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1414 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1416 v->flags = IXL_FILTER_ADD;
1418 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1422 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1424 struct iavf_sc *sc = iflib_get_softc(ctx);
1425 struct ixl_vsi *vsi = &sc->vsi;
1426 struct iavf_vlan_filter *v;
1429 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1432 SLIST_FOREACH(v, sc->vlan_filters, next) {
1433 if (v->vlan == vtag) {
1434 v->flags = IXL_FILTER_DEL;
1440 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1444 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1446 struct iavf_sc *sc = iflib_get_softc(ctx);
1447 struct ixl_vsi *vsi = &sc->vsi;
1448 if_t ifp = iflib_get_ifp(ctx);
1451 case IFCOUNTER_IPACKETS:
1452 return (vsi->ipackets);
1453 case IFCOUNTER_IERRORS:
1454 return (vsi->ierrors);
1455 case IFCOUNTER_OPACKETS:
1456 return (vsi->opackets);
1457 case IFCOUNTER_OERRORS:
1458 return (vsi->oerrors);
1459 case IFCOUNTER_COLLISIONS:
1460 /* Collisions are by standard impossible in 40G/10G Ethernet */
1462 case IFCOUNTER_IBYTES:
1463 return (vsi->ibytes);
1464 case IFCOUNTER_OBYTES:
1465 return (vsi->obytes);
1466 case IFCOUNTER_IMCASTS:
1467 return (vsi->imcasts);
1468 case IFCOUNTER_OMCASTS:
1469 return (vsi->omcasts);
1470 case IFCOUNTER_IQDROPS:
1471 return (vsi->iqdrops);
1472 case IFCOUNTER_OQDROPS:
1473 return (vsi->oqdrops);
1474 case IFCOUNTER_NOPROTO:
1475 return (vsi->noproto);
1477 return (if_get_counter_default(ifp, cnt));
1483 iavf_free_pci_resources(struct iavf_sc *sc)
1485 struct ixl_vsi *vsi = &sc->vsi;
1486 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1487 device_t dev = sc->dev;
1489 /* We may get here before stations are setup */
1493 /* Release all interrupts */
1494 iflib_irq_free(vsi->ctx, &vsi->irq);
1496 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1497 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1500 if (sc->pci_mem != NULL)
1501 bus_release_resource(dev, SYS_RES_MEMORY,
1502 PCIR_BAR(0), sc->pci_mem);
1507 ** Requests a VF reset from the PF.
1509 ** Requires the VF's Admin Queue to be initialized.
1512 iavf_reset(struct iavf_sc *sc)
1514 struct i40e_hw *hw = &sc->hw;
1515 device_t dev = sc->dev;
1518 /* Ask the PF to reset us if we are initiating */
1519 if (sc->init_state != IAVF_RESET_PENDING)
1520 iavf_request_reset(sc);
1522 i40e_msec_pause(100);
1523 error = iavf_reset_complete(hw);
1525 device_printf(dev, "%s: VF reset failed\n",
1529 pci_enable_busmaster(dev);
1531 error = i40e_shutdown_adminq(hw);
1533 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1538 error = i40e_init_adminq(hw);
1540 device_printf(dev, "%s: init_adminq failed: %d\n",
1545 iavf_enable_adminq_irq(hw);
1550 iavf_reset_complete(struct i40e_hw *hw)
1554 /* Wait up to ~10 seconds */
1555 for (int i = 0; i < 100; i++) {
1556 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1557 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1559 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1560 (reg == VIRTCHNL_VFR_COMPLETED))
1562 i40e_msec_pause(100);
1569 iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1571 struct ixl_vsi *vsi = &sc->vsi;
1572 if_ctx_t ctx = vsi->ctx;
1573 struct ifnet *ifp = iflib_get_ifp(ctx);
1575 INIT_DBG_DEV(dev, "begin");
1577 vsi->shared->isc_max_frame_size =
1578 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1579 + ETHER_VLAN_ENCAP_LEN;
1580 #if __FreeBSD_version >= 1100000
1581 if_setbaudrate(ifp, IF_Gbps(40));
1583 if_initbaudrate(ifp, IF_Gbps(40));
1586 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1587 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1591 ** Get a new filter and add it to the mac filter list.
1593 static struct iavf_mac_filter *
1594 iavf_get_mac_filter(struct iavf_sc *sc)
1596 struct iavf_mac_filter *f;
1598 f = malloc(sizeof(struct iavf_mac_filter),
1599 M_IAVF, M_NOWAIT | M_ZERO);
1601 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1607 ** Find the filter with matching MAC address
1609 static struct iavf_mac_filter *
1610 iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1612 struct iavf_mac_filter *f;
1615 SLIST_FOREACH(f, sc->mac_filters, next) {
1616 if (cmp_etheraddr(f->macaddr, macaddr)) {
1628 ** Admin Queue interrupt handler
1631 iavf_msix_adminq(void *arg)
1633 struct iavf_sc *sc = arg;
1634 struct i40e_hw *hw = &sc->hw;
1636 bool do_task = FALSE;
1640 reg = rd32(hw, I40E_VFINT_ICR01);
1642 * For masking off interrupt causes that need to be handled before
1643 * they can be re-enabled
1645 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1647 /* Check on the cause */
1648 if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1649 mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1653 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1654 iavf_enable_adminq_irq(hw);
1657 return (FILTER_SCHEDULE_THREAD);
1659 return (FILTER_HANDLED);
1663 iavf_enable_intr(struct ixl_vsi *vsi)
1665 struct i40e_hw *hw = vsi->hw;
1666 struct ixl_rx_queue *que = vsi->rx_queues;
1668 iavf_enable_adminq_irq(hw);
1669 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1670 iavf_enable_queue_irq(hw, que->rxr.me);
1674 iavf_disable_intr(struct ixl_vsi *vsi)
1676 struct i40e_hw *hw = vsi->hw;
1677 struct ixl_rx_queue *que = vsi->rx_queues;
1679 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1680 iavf_disable_queue_irq(hw, que->rxr.me);
1684 iavf_disable_adminq_irq(struct i40e_hw *hw)
1686 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1687 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1689 rd32(hw, I40E_VFGEN_RSTAT);
1693 iavf_enable_adminq_irq(struct i40e_hw *hw)
1695 wr32(hw, I40E_VFINT_DYN_CTL01,
1696 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1697 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1698 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1700 rd32(hw, I40E_VFGEN_RSTAT);
1704 iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1708 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1709 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1710 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1711 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1715 iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1717 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1718 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1719 rd32(hw, I40E_VFGEN_RSTAT);
1723 iavf_configure_tx_itr(struct iavf_sc *sc)
1725 struct i40e_hw *hw = &sc->hw;
1726 struct ixl_vsi *vsi = &sc->vsi;
1727 struct ixl_tx_queue *que = vsi->tx_queues;
1729 vsi->tx_itr_setting = sc->tx_itr;
1731 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1732 struct tx_ring *txr = &que->txr;
1734 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1735 vsi->tx_itr_setting);
1736 txr->itr = vsi->tx_itr_setting;
1737 txr->latency = IXL_AVE_LATENCY;
1742 iavf_configure_rx_itr(struct iavf_sc *sc)
1744 struct i40e_hw *hw = &sc->hw;
1745 struct ixl_vsi *vsi = &sc->vsi;
1746 struct ixl_rx_queue *que = vsi->rx_queues;
1748 vsi->rx_itr_setting = sc->rx_itr;
1750 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1751 struct rx_ring *rxr = &que->rxr;
1753 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1754 vsi->rx_itr_setting);
1755 rxr->itr = vsi->rx_itr_setting;
1756 rxr->latency = IXL_AVE_LATENCY;
1761 * Get initial ITR values from tunable values.
1764 iavf_configure_itr(struct iavf_sc *sc)
1766 iavf_configure_tx_itr(sc);
1767 iavf_configure_rx_itr(sc);
1771 ** Provide a update to the queue RX
1772 ** interrupt moderation value.
1775 iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1777 struct ixl_vsi *vsi = que->vsi;
1778 struct i40e_hw *hw = vsi->hw;
1779 struct rx_ring *rxr = &que->rxr;
1781 /* Idle, do nothing */
1782 if (rxr->bytes == 0)
1785 /* Update the hardware if needed */
1786 if (rxr->itr != vsi->rx_itr_setting) {
1787 rxr->itr = vsi->rx_itr_setting;
1788 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1789 que->rxr.me), rxr->itr);
1794 iavf_msix_que(void *arg)
1796 struct ixl_rx_queue *rx_que = arg;
1800 iavf_set_queue_rx_itr(rx_que);
1801 // iavf_set_queue_tx_itr(que);
1803 return (FILTER_SCHEDULE_THREAD);
1806 /*********************************************************************
1807 * Multicast Initialization
1809 * This routine is called by init to reset a fresh state.
1811 **********************************************************************/
1813 iavf_init_multi(struct iavf_sc *sc)
1815 struct iavf_mac_filter *f;
1818 /* First clear any multicast filters */
1819 SLIST_FOREACH(f, sc->mac_filters, next) {
1820 if ((f->flags & IXL_FILTER_USED)
1821 && (f->flags & IXL_FILTER_MC)) {
1822 f->flags |= IXL_FILTER_DEL;
1827 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1831 ** Note: this routine updates the OS on the link state
1832 ** the real check of the hardware only happens with
1833 ** a link interrupt.
1836 iavf_update_link_status(struct iavf_sc *sc)
1838 struct ixl_vsi *vsi = &sc->vsi;
1842 if (vsi->link_active == FALSE) {
1843 vsi->link_active = TRUE;
1844 baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1845 iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1846 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1848 } else { /* Link down */
1849 if (vsi->link_active == TRUE) {
1850 vsi->link_active = FALSE;
1851 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1856 /*********************************************************************
1858 * This routine disables all traffic on the adapter by issuing a
1859 * global reset on the MAC and deallocates TX/RX buffers.
1861 **********************************************************************/
1864 iavf_stop(struct iavf_sc *sc)
1870 iavf_disable_intr(&sc->vsi);
1872 if (atomic_load_acq_32(&sc->queues_enabled))
1873 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1877 iavf_if_stop(if_ctx_t ctx)
1879 struct iavf_sc *sc = iflib_get_softc(ctx);
1885 iavf_config_rss_reg(struct iavf_sc *sc)
1887 struct i40e_hw *hw = &sc->hw;
1888 struct ixl_vsi *vsi = &sc->vsi;
1890 u64 set_hena = 0, hena;
1892 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1894 u32 rss_hash_config;
1897 /* Don't set up RSS if using a single queue */
1898 if (vsi->num_rx_queues == 1) {
1899 wr32(hw, I40E_VFQF_HENA(0), 0);
1900 wr32(hw, I40E_VFQF_HENA(1), 0);
1906 /* Fetch the configured RSS key */
1907 rss_getkey((uint8_t *) &rss_seed);
1909 ixl_get_default_rss_key(rss_seed);
1912 /* Fill out hash function seed */
1913 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1914 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1916 /* Enable PCTYPES for RSS: */
1918 rss_hash_config = rss_gethashconfig();
1919 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1920 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1921 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1922 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1923 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1924 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1925 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1926 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1927 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1928 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1929 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1930 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1931 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1932 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1934 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1936 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1937 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1939 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1940 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1942 /* Populate the LUT with max no. of queues in round robin fashion */
1943 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1944 if (j == vsi->num_rx_queues)
1948 * Fetch the RSS bucket id for the given indirection entry.
1949 * Cap it at the number of configured buckets (which is
1952 que_id = rss_get_indirection_to_bucket(i);
1953 que_id = que_id % vsi->num_queues;
1957 /* lut = 4-byte sliding window of 4 lut entries */
1958 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1959 /* On i = 3, we have 4 entries in lut; write to the register */
1961 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1962 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1969 iavf_config_rss_pf(struct iavf_sc *sc)
1971 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1973 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1975 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1979 ** iavf_config_rss - setup RSS
1981 ** RSS keys and table are cleared on VF reset.
1984 iavf_config_rss(struct iavf_sc *sc)
1986 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1987 iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1988 iavf_config_rss_reg(sc);
1989 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1990 iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
1991 iavf_config_rss_pf(sc);
1993 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
1997 ** This routine adds new MAC filters to the sc's list;
1998 ** these are later added in hardware by sending a virtual
2002 iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
2004 struct iavf_mac_filter *f;
2006 /* Does one already exist? */
2007 f = iavf_find_mac_filter(sc, macaddr);
2009 iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
2010 MAC_FORMAT_ARGS(macaddr));
2014 /* If not, get a new empty filter */
2015 f = iavf_get_mac_filter(sc);
2017 device_printf(sc->dev, "%s: no filters available!!\n",
2022 iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2023 MAC_FORMAT_ARGS(macaddr));
2025 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2026 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2032 ** Marks a MAC filter for deletion.
2035 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2037 struct iavf_mac_filter *f;
2039 f = iavf_find_mac_filter(sc, macaddr);
2043 f->flags |= IXL_FILTER_DEL;
2048 * Re-uses the name from the PF driver.
2051 iavf_add_device_sysctls(struct iavf_sc *sc)
2053 struct ixl_vsi *vsi = &sc->vsi;
2054 device_t dev = sc->dev;
2056 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2057 struct sysctl_oid_list *ctx_list =
2058 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2059 struct sysctl_oid *debug_node;
2060 struct sysctl_oid_list *debug_list;
2062 SYSCTL_ADD_PROC(ctx, ctx_list,
2063 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
2064 sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2066 SYSCTL_ADD_PROC(ctx, ctx_list,
2067 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
2068 sc, 0, iavf_sysctl_tx_itr, "I",
2069 "Immediately set TX ITR value for all queues");
2071 SYSCTL_ADD_PROC(ctx, ctx_list,
2072 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
2073 sc, 0, iavf_sysctl_rx_itr, "I",
2074 "Immediately set RX ITR value for all queues");
2076 /* Add sysctls meant to print debug information, but don't list them
2077 * in "sysctl -a" output. */
2078 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2079 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
2080 debug_list = SYSCTL_CHILDREN(debug_node);
2082 SYSCTL_ADD_UINT(ctx, debug_list,
2083 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2084 &sc->hw.debug_mask, 0, "Shared code debug message level");
2086 SYSCTL_ADD_UINT(ctx, debug_list,
2087 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2088 &sc->dbg_mask, 0, "Non-shared code debug message level");
2090 SYSCTL_ADD_PROC(ctx, debug_list,
2091 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
2092 sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2094 SYSCTL_ADD_PROC(ctx, debug_list,
2095 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
2096 sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2098 SYSCTL_ADD_PROC(ctx, debug_list,
2099 OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
2100 sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2102 SYSCTL_ADD_PROC(ctx, debug_list,
2103 OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
2104 sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2106 /* Add stats sysctls */
2107 ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2108 ixl_add_queues_sysctls(dev, vsi);
2113 iavf_init_filters(struct iavf_sc *sc)
2115 sc->mac_filters = malloc(sizeof(struct mac_list),
2116 M_IAVF, M_WAITOK | M_ZERO);
2117 SLIST_INIT(sc->mac_filters);
2118 sc->vlan_filters = malloc(sizeof(struct vlan_list),
2119 M_IAVF, M_WAITOK | M_ZERO);
2120 SLIST_INIT(sc->vlan_filters);
2124 iavf_free_filters(struct iavf_sc *sc)
2126 struct iavf_mac_filter *f;
2127 struct iavf_vlan_filter *v;
2129 while (!SLIST_EMPTY(sc->mac_filters)) {
2130 f = SLIST_FIRST(sc->mac_filters);
2131 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2134 free(sc->mac_filters, M_IAVF);
2135 while (!SLIST_EMPTY(sc->vlan_filters)) {
2136 v = SLIST_FIRST(sc->vlan_filters);
2137 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2140 free(sc->vlan_filters, M_IAVF);
2144 iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2158 switch (link_speed) {
2159 case VIRTCHNL_LINK_SPEED_100MB:
2162 case VIRTCHNL_LINK_SPEED_1GB:
2165 case VIRTCHNL_LINK_SPEED_10GB:
2168 case VIRTCHNL_LINK_SPEED_40GB:
2171 case VIRTCHNL_LINK_SPEED_20GB:
2174 case VIRTCHNL_LINK_SPEED_25GB:
2177 case VIRTCHNL_LINK_SPEED_UNKNOWN:
2183 return speeds[index];
2187 iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2189 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2192 error = sysctl_handle_string(oidp,
2193 iavf_vc_speed_to_string(sc->link_speed),
2199 * Sanity check and save off tunable values.
2202 iavf_save_tunables(struct iavf_sc *sc)
2204 device_t dev = sc->dev;
2206 /* Save tunable information */
2207 sc->dbg_mask = iavf_core_debug_mask;
2208 sc->hw.debug_mask = iavf_shared_debug_mask;
2209 sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2211 if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2212 device_printf(dev, "Invalid tx_itr value of %d set!\n",
2214 device_printf(dev, "tx_itr must be between %d and %d, "
2217 device_printf(dev, "Using default value of %d instead\n",
2219 sc->tx_itr = IXL_ITR_4K;
2221 sc->tx_itr = iavf_tx_itr;
2223 if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2224 device_printf(dev, "Invalid rx_itr value of %d set!\n",
2226 device_printf(dev, "rx_itr must be between %d and %d, "
2229 device_printf(dev, "Using default value of %d instead\n",
2231 sc->rx_itr = IXL_ITR_8K;
2233 sc->rx_itr = iavf_rx_itr;
2237 * Used to set the Tx ITR value for all of the VF's queues.
2238 * Writes to the ITR registers immediately.
2241 iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2243 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2244 device_t dev = sc->dev;
2245 int requested_tx_itr;
2248 requested_tx_itr = sc->tx_itr;
2249 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2250 if ((error) || (req->newptr == NULL))
2252 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2254 "Invalid TX itr value; value must be between 0 and %d\n",
2259 sc->tx_itr = requested_tx_itr;
2260 iavf_configure_tx_itr(sc);
2266 * Used to set the Rx ITR value for all of the VF's queues.
2267 * Writes to the ITR registers immediately.
2270 iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2272 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2273 device_t dev = sc->dev;
2274 int requested_rx_itr;
2277 requested_rx_itr = sc->rx_itr;
2278 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2279 if ((error) || (req->newptr == NULL))
2281 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2283 "Invalid RX itr value; value must be between 0 and %d\n",
2288 sc->rx_itr = requested_rx_itr;
2289 iavf_configure_rx_itr(sc);
2295 iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2297 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2298 struct iavf_mac_filter *f;
2299 struct iavf_vlan_filter *v;
2300 device_t dev = sc->dev;
2301 int ftl_len, ftl_counter = 0, error = 0;
2304 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2306 device_printf(dev, "Could not allocate sbuf for output.\n");
2310 sbuf_printf(buf, "\n");
2312 /* Print MAC filters */
2313 sbuf_printf(buf, "MAC Filters:\n");
2315 SLIST_FOREACH(f, sc->mac_filters, next)
2318 sbuf_printf(buf, "(none)\n");
2320 SLIST_FOREACH(f, sc->mac_filters, next) {
2322 MAC_FORMAT ", flags %#06x\n",
2323 MAC_FORMAT_ARGS(f->macaddr), f->flags);
2327 /* Print VLAN filters */
2328 sbuf_printf(buf, "VLAN Filters:\n");
2330 SLIST_FOREACH(v, sc->vlan_filters, next)
2333 sbuf_printf(buf, "(none)");
2335 SLIST_FOREACH(v, sc->vlan_filters, next) {
2339 /* don't print '\n' for last entry */
2340 if (++ftl_counter != ftl_len)
2341 sbuf_printf(buf, "\n");
2345 error = sbuf_finish(buf);
2347 device_printf(dev, "Error finishing sbuf: %d\n", error);
2354 * Print out mapping of TX queue indexes and Rx queue indexes
2358 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2360 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2361 struct ixl_vsi *vsi = &sc->vsi;
2362 device_t dev = sc->dev;
2366 struct ixl_rx_queue *rx_que = vsi->rx_queues;
2367 struct ixl_tx_queue *tx_que = vsi->tx_queues;
2369 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2371 device_printf(dev, "Could not allocate sbuf for output.\n");
2375 sbuf_cat(buf, "\n");
2376 for (int i = 0; i < vsi->num_rx_queues; i++) {
2377 rx_que = &vsi->rx_queues[i];
2378 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2380 for (int i = 0; i < vsi->num_tx_queues; i++) {
2381 tx_que = &vsi->tx_queues[i];
2382 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2385 error = sbuf_finish(buf);
2387 device_printf(dev, "Error finishing sbuf: %d\n", error);
2393 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2395 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2397 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2398 int do_reset = 0, error = 0;
2400 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2401 if ((error) || (req->newptr == NULL))
2404 if (do_reset == 1) {
2406 if (CTX_ACTIVE(sc->vsi.ctx))
2407 iflib_request_reset(sc->vsi.ctx);
2414 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2416 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2417 device_t dev = sc->dev;
2418 int do_reset = 0, error = 0;
2420 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2421 if ((error) || (req->newptr == NULL))
2424 if (do_reset == 1) {
2425 if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2426 device_printf(dev, "PCIE FLR failed\n");
2429 else if (CTX_ACTIVE(sc->vsi.ctx))
2430 iflib_request_reset(sc->vsi.ctx);