1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 /*********************************************************************
39 *********************************************************************/
40 #define IAVF_DRIVER_VERSION_MAJOR 2
41 #define IAVF_DRIVER_VERSION_MINOR 0
42 #define IAVF_DRIVER_VERSION_BUILD 0
44 #define IAVF_DRIVER_VERSION_STRING \
45 __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \
46 __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \
47 __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
49 /*********************************************************************
52 * Used by probe to select devices to load on
54 * ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
57 static pci_vendor_info_t iavf_vendor_info_array[] =
59 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62 /* required last entry */
66 /*********************************************************************
68 *********************************************************************/
69 static void *iavf_register(device_t dev);
70 static int iavf_if_attach_pre(if_ctx_t ctx);
71 static int iavf_if_attach_post(if_ctx_t ctx);
72 static int iavf_if_detach(if_ctx_t ctx);
73 static int iavf_if_shutdown(if_ctx_t ctx);
74 static int iavf_if_suspend(if_ctx_t ctx);
75 static int iavf_if_resume(if_ctx_t ctx);
76 static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77 static void iavf_if_enable_intr(if_ctx_t ctx);
78 static void iavf_if_disable_intr(if_ctx_t ctx);
79 static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80 static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81 static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82 static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83 static void iavf_if_queues_free(if_ctx_t ctx);
84 static void iavf_if_update_admin_status(if_ctx_t ctx);
85 static void iavf_if_multi_set(if_ctx_t ctx);
86 static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87 static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88 static int iavf_if_media_change(if_ctx_t ctx);
89 static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
90 static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91 static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92 static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93 static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94 static void iavf_if_stop(if_ctx_t ctx);
95 static bool iavf_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
97 static int iavf_allocate_pci_resources(struct iavf_sc *);
98 static int iavf_reset_complete(struct i40e_hw *);
99 static int iavf_setup_vc(struct iavf_sc *);
100 static int iavf_reset(struct iavf_sc *);
101 static int iavf_vf_config(struct iavf_sc *);
102 static void iavf_init_filters(struct iavf_sc *);
103 static void iavf_free_pci_resources(struct iavf_sc *);
104 static void iavf_free_filters(struct iavf_sc *);
105 static void iavf_setup_interface(device_t, struct iavf_sc *);
106 static void iavf_add_device_sysctls(struct iavf_sc *);
107 static void iavf_enable_adminq_irq(struct i40e_hw *);
108 static void iavf_disable_adminq_irq(struct i40e_hw *);
109 static void iavf_enable_queue_irq(struct i40e_hw *, int);
110 static void iavf_disable_queue_irq(struct i40e_hw *, int);
111 static void iavf_config_rss(struct iavf_sc *);
112 static void iavf_stop(struct iavf_sc *);
114 static int iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
115 static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
116 static int iavf_msix_que(void *);
117 static int iavf_msix_adminq(void *);
118 //static void iavf_del_multi(struct iavf_sc *sc);
119 static void iavf_init_multi(struct iavf_sc *sc);
120 static void iavf_configure_itr(struct iavf_sc *sc);
122 static int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
123 static int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
124 static int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
125 static int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
126 static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
127 static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
128 static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
130 static void iavf_save_tunables(struct iavf_sc *);
131 static enum i40e_status_code
132 iavf_process_adminq(struct iavf_sc *, u16 *);
133 static int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
134 static int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
136 /*********************************************************************
137 * FreeBSD Device Interface Entry Points
138 *********************************************************************/
140 static device_method_t iavf_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_register, iavf_register),
143 DEVMETHOD(device_probe, iflib_device_probe),
144 DEVMETHOD(device_attach, iflib_device_attach),
145 DEVMETHOD(device_detach, iflib_device_detach),
146 DEVMETHOD(device_shutdown, iflib_device_shutdown),
150 static driver_t iavf_driver = {
151 "iavf", iavf_methods, sizeof(struct iavf_sc),
154 devclass_t iavf_devclass;
155 DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
156 MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
157 pci, iavf, iavf_vendor_info_array,
158 nitems(iavf_vendor_info_array) - 1);
159 MODULE_VERSION(iavf, 1);
161 MODULE_DEPEND(iavf, pci, 1, 1, 1);
162 MODULE_DEPEND(iavf, ether, 1, 1, 1);
163 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
165 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
167 static device_method_t iavf_if_methods[] = {
168 DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
169 DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
170 DEVMETHOD(ifdi_detach, iavf_if_detach),
171 DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
172 DEVMETHOD(ifdi_suspend, iavf_if_suspend),
173 DEVMETHOD(ifdi_resume, iavf_if_resume),
174 DEVMETHOD(ifdi_init, iavf_if_init),
175 DEVMETHOD(ifdi_stop, iavf_if_stop),
176 DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
177 DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
178 DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
179 DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
181 DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
182 DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
183 DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
184 DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
185 DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
186 DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
187 DEVMETHOD(ifdi_media_status, iavf_if_media_status),
188 DEVMETHOD(ifdi_media_change, iavf_if_media_change),
189 DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
190 DEVMETHOD(ifdi_timer, iavf_if_timer),
191 DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
192 DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
193 DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
194 DEVMETHOD(ifdi_needs_restart, iavf_if_needs_restart),
198 static driver_t iavf_if_driver = {
199 "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
203 ** TUNEABLE PARAMETERS:
206 static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
207 "iavf driver parameters");
210 * Different method for processing TX descriptor
213 static int iavf_enable_head_writeback = 0;
214 TUNABLE_INT("hw.iavf.enable_head_writeback",
215 &iavf_enable_head_writeback);
216 SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
217 &iavf_enable_head_writeback, 0,
218 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
220 static int iavf_core_debug_mask = 0;
221 TUNABLE_INT("hw.iavf.core_debug_mask",
222 &iavf_core_debug_mask);
223 SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
224 &iavf_core_debug_mask, 0,
225 "Display debug statements that are printed in non-shared code");
227 static int iavf_shared_debug_mask = 0;
228 TUNABLE_INT("hw.iavf.shared_debug_mask",
229 &iavf_shared_debug_mask);
230 SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
231 &iavf_shared_debug_mask, 0,
232 "Display debug statements that are printed in shared code");
234 int iavf_rx_itr = IXL_ITR_8K;
235 TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
236 SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
237 &iavf_rx_itr, 0, "RX Interrupt Rate");
239 int iavf_tx_itr = IXL_ITR_4K;
240 TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
241 SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
242 &iavf_tx_itr, 0, "TX Interrupt Rate");
244 extern struct if_txrx ixl_txrx_hwb;
245 extern struct if_txrx ixl_txrx_dwb;
247 static struct if_shared_ctx iavf_sctx_init = {
248 .isc_magic = IFLIB_MAGIC,
249 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
250 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
251 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
252 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
253 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
254 .isc_rx_maxsize = 16384,
255 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
256 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
261 .isc_admin_intrcnt = 1,
262 .isc_vendor_info = iavf_vendor_info_array,
263 .isc_driver_version = IAVF_DRIVER_VERSION_STRING,
264 .isc_driver = &iavf_if_driver,
265 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
267 .isc_nrxd_min = {IXL_MIN_RING},
268 .isc_ntxd_min = {IXL_MIN_RING},
269 .isc_nrxd_max = {IXL_MAX_RING},
270 .isc_ntxd_max = {IXL_MAX_RING},
271 .isc_nrxd_default = {IXL_DEFAULT_RING},
272 .isc_ntxd_default = {IXL_DEFAULT_RING},
275 if_shared_ctx_t iavf_sctx = &iavf_sctx_init;
279 iavf_register(device_t dev)
285 iavf_allocate_pci_resources(struct iavf_sc *sc)
287 struct i40e_hw *hw = &sc->hw;
288 device_t dev = iflib_get_dev(sc->vsi.ctx);
293 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
296 if (!(sc->pci_mem)) {
297 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
301 /* Save off the PCI information */
302 hw->vendor_id = pci_get_vendor(dev);
303 hw->device_id = pci_get_device(dev);
304 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
305 hw->subsystem_vendor_id =
306 pci_read_config(dev, PCIR_SUBVEND_0, 2);
307 hw->subsystem_device_id =
308 pci_read_config(dev, PCIR_SUBDEV_0, 2);
310 hw->bus.device = pci_get_slot(dev);
311 hw->bus.func = pci_get_function(dev);
313 /* Save off register access information */
314 sc->osdep.mem_bus_space_tag =
315 rman_get_bustag(sc->pci_mem);
316 sc->osdep.mem_bus_space_handle =
317 rman_get_bushandle(sc->pci_mem);
318 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
319 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
322 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
323 sc->hw.back = &sc->osdep;
329 iavf_if_attach_pre(if_ctx_t ctx)
335 if_softc_ctx_t scctx;
338 dev = iflib_get_dev(ctx);
339 sc = iflib_get_softc(ctx);
350 vsi->media = iflib_get_media(ctx);
351 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
353 iavf_save_tunables(sc);
355 /* Do PCI setup - map BAR0, etc */
356 if (iavf_allocate_pci_resources(sc)) {
357 device_printf(dev, "%s: Allocation of PCI resources failed\n",
363 iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
366 * XXX: This is called by init_shared_code in the PF driver,
367 * but the rest of that function does not support VFs.
369 error = i40e_set_mac_type(hw);
371 device_printf(dev, "%s: set_mac_type failed: %d\n",
376 error = iavf_reset_complete(hw);
378 device_printf(dev, "%s: Device is still being reset\n",
383 iavf_dbg_init(sc, "VF Device is ready for configuration\n");
385 /* Sets up Admin Queue */
386 error = iavf_setup_vc(sc);
388 device_printf(dev, "%s: Error setting up PF comms, %d\n",
393 iavf_dbg_init(sc, "PF API version verified\n");
395 /* Need API version before sending reset message */
396 error = iavf_reset(sc);
398 device_printf(dev, "VF reset failed; reload the driver\n");
402 iavf_dbg_init(sc, "VF reset complete\n");
404 /* Ask for VF config from PF */
405 error = iavf_vf_config(sc);
407 device_printf(dev, "Error getting configuration from PF: %d\n",
413 "VSIs %d, QPs %d, MSI-X %d, RSS sizes: key %d lut %d\n",
414 sc->vf_res->num_vsis,
415 sc->vf_res->num_queue_pairs,
416 sc->vf_res->max_vectors,
417 sc->vf_res->rss_key_size,
418 sc->vf_res->rss_lut_size);
419 iavf_dbg_info(sc, "Capabilities=%b\n",
420 sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
422 /* got VF config message back from PF, now we can parse it */
423 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
424 /* XXX: We only use the first VSI we find */
425 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
426 sc->vsi_res = &sc->vf_res->vsi_res[i];
429 device_printf(dev, "%s: no LAN VSI found\n", __func__);
433 vsi->id = sc->vsi_res->vsi_id;
435 iavf_dbg_init(sc, "Resource Acquisition complete\n");
437 /* If no mac address was assigned just make a random one */
438 if (!iavf_check_ether_addr(hw->mac.addr)) {
439 u8 addr[ETHER_ADDR_LEN];
440 arc4rand(&addr, sizeof(addr), 0);
443 bcopy(addr, hw->mac.addr, sizeof(addr));
445 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
446 iflib_set_mac(ctx, hw->mac.addr);
448 /* Allocate filter lists */
449 iavf_init_filters(sc);
451 /* Fill out more iflib parameters */
452 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
453 sc->vsi_res->num_queue_pairs;
454 if (vsi->enable_head_writeback) {
455 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
456 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
457 scctx->isc_txrx = &ixl_txrx_hwb;
459 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
460 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
461 scctx->isc_txrx = &ixl_txrx_dwb;
463 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
464 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
465 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
466 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
467 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
468 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
469 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
470 scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
471 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
472 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
477 free(sc->vf_res, M_IAVF);
479 i40e_shutdown_adminq(hw);
481 iavf_free_pci_resources(sc);
487 iavf_if_attach_post(if_ctx_t ctx)
495 INIT_DBG_DEV(dev, "begin");
497 dev = iflib_get_dev(ctx);
498 sc = iflib_get_softc(ctx);
500 vsi->ifp = iflib_get_ifp(ctx);
503 /* Save off determined number of queues for interface */
504 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
505 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
507 /* Setup the stack interface */
508 iavf_setup_interface(dev, sc);
510 INIT_DBG_DEV(dev, "Interface setup complete");
512 /* Initialize statistics & add sysctls */
513 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
514 iavf_add_device_sysctls(sc);
516 sc->init_state = IAVF_INIT_READY;
517 atomic_store_rel_32(&sc->queues_enabled, 0);
519 /* We want AQ enabled early for init */
520 iavf_enable_adminq_irq(hw);
522 INIT_DBG_DEV(dev, "end");
528 * XXX: iflib always ignores the return value of detach()
529 * -> This means that this isn't allowed to fail
532 iavf_if_detach(if_ctx_t ctx)
534 struct iavf_sc *sc = iflib_get_softc(ctx);
535 struct ixl_vsi *vsi = &sc->vsi;
536 struct i40e_hw *hw = &sc->hw;
537 device_t dev = sc->dev;
538 enum i40e_status_code status;
540 INIT_DBG_DEV(dev, "begin");
542 /* Remove all the media and link information */
543 ifmedia_removeall(vsi->media);
545 iavf_disable_adminq_irq(hw);
546 status = i40e_shutdown_adminq(&sc->hw);
547 if (status != I40E_SUCCESS) {
549 "i40e_shutdown_adminq() failed with status %s\n",
550 i40e_stat_str(hw, status));
553 free(sc->vf_res, M_IAVF);
554 iavf_free_pci_resources(sc);
555 iavf_free_filters(sc);
557 INIT_DBG_DEV(dev, "end");
562 iavf_if_shutdown(if_ctx_t ctx)
568 iavf_if_suspend(if_ctx_t ctx)
574 iavf_if_resume(if_ctx_t ctx)
580 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
583 if_ctx_t ctx = sc->vsi.ctx;
585 error = ixl_vc_send_cmd(sc, op);
587 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
591 /* Don't wait for a response if the device is being detached. */
592 if (!iflib_in_detach(ctx)) {
593 iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
594 error = sx_sleep(ixl_vc_get_op_chan(sc, op),
595 iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
597 if (error == EWOULDBLOCK)
598 device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
605 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
609 error = ixl_vc_send_cmd(sc, op);
611 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
617 iavf_init_queues(struct ixl_vsi *vsi)
619 struct ixl_tx_queue *tx_que = vsi->tx_queues;
620 struct ixl_rx_queue *rx_que = vsi->rx_queues;
623 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
624 ixl_init_tx_ring(vsi, tx_que);
626 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
629 rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
631 wr32(vsi->hw, rxr->tail, 0);
636 iavf_if_init(if_ctx_t ctx)
638 struct iavf_sc *sc = iflib_get_softc(ctx);
639 struct ixl_vsi *vsi = &sc->vsi;
640 struct i40e_hw *hw = &sc->hw;
641 struct ifnet *ifp = iflib_get_ifp(ctx);
642 u8 tmpaddr[ETHER_ADDR_LEN];
645 INIT_DBG_IF(ifp, "begin");
647 MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
649 error = iavf_reset_complete(hw);
651 device_printf(sc->dev, "%s: VF reset failed\n",
655 if (!i40e_check_asq_alive(hw)) {
656 iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
657 pci_enable_busmaster(sc->dev);
658 i40e_shutdown_adminq(hw);
659 i40e_init_adminq(hw);
662 /* Make sure queues are disabled */
663 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
665 bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
666 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
667 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
668 error = iavf_del_mac_filter(sc, hw->mac.addr);
670 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
672 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
675 error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
676 if (!error || error == EEXIST)
677 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
678 iflib_set_mac(ctx, hw->mac.addr);
680 /* Prepare the queues for operation */
681 iavf_init_queues(vsi);
683 /* Set initial ITR values */
684 iavf_configure_itr(sc);
686 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
692 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
694 /* Init SW TX ring indices */
695 if (vsi->enable_head_writeback)
696 ixl_init_tx_cidx(vsi);
698 ixl_init_tx_rsqs(vsi);
700 /* Configure promiscuous mode */
701 iavf_if_promisc_set(ctx, if_getflags(ifp));
704 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
706 sc->init_state = IAVF_RUNNING;
710 * iavf_attach() helper function; initializes the admin queue
711 * and attempts to establish contact with the PF by
712 * retrying the initial "API version" message several times
713 * or until the PF responds.
716 iavf_setup_vc(struct iavf_sc *sc)
718 struct i40e_hw *hw = &sc->hw;
719 device_t dev = sc->dev;
720 int error = 0, ret_error = 0, asq_retries = 0;
721 bool send_api_ver_retried = 0;
723 /* Need to set these AQ paramters before initializing AQ */
724 hw->aq.num_arq_entries = IXL_AQ_LEN;
725 hw->aq.num_asq_entries = IXL_AQ_LEN;
726 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
727 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
729 for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
730 /* Initialize admin queue */
731 error = i40e_init_adminq(hw);
733 device_printf(dev, "%s: init_adminq failed: %d\n",
739 iavf_dbg_init(sc, "Initialized Admin Queue; starting"
740 " send_api_ver attempt %d", i+1);
743 /* Send VF's API version */
744 error = iavf_send_api_ver(sc);
746 i40e_shutdown_adminq(hw);
748 device_printf(dev, "%s: unable to send api"
749 " version to PF on attempt %d, error %d\n",
750 __func__, i+1, error);
754 while (!i40e_asq_done(hw)) {
755 if (++asq_retries > IAVF_AQ_MAX_ERR) {
756 i40e_shutdown_adminq(hw);
757 device_printf(dev, "Admin Queue timeout "
758 "(waiting for send_api_ver), %d more tries...\n",
759 IAVF_AQ_MAX_ERR - (i + 1));
765 if (asq_retries > IAVF_AQ_MAX_ERR)
768 iavf_dbg_init(sc, "Sent API version message to PF");
770 /* Verify that the VF accepts the PF's API version */
771 error = iavf_verify_api_ver(sc);
772 if (error == ETIMEDOUT) {
773 if (!send_api_ver_retried) {
774 /* Resend message, one more time */
775 send_api_ver_retried = true;
777 "%s: Timeout while verifying API version on first"
778 " try!\n", __func__);
782 "%s: Timeout while verifying API version on second"
783 " try!\n", __func__);
790 "%s: Unable to verify API version,"
791 " error %s\n", __func__, i40e_stat_str(hw, error));
798 i40e_shutdown_adminq(hw);
803 * iavf_attach() helper function; asks the PF for this VF's
804 * configuration, and saves the information if it receives it.
807 iavf_vf_config(struct iavf_sc *sc)
809 struct i40e_hw *hw = &sc->hw;
810 device_t dev = sc->dev;
811 int bufsz, error = 0, ret_error = 0;
812 int asq_retries, retried = 0;
815 error = iavf_send_vf_config_msg(sc);
818 "%s: Unable to send VF config request, attempt %d,"
819 " error %d\n", __func__, retried + 1, error);
824 while (!i40e_asq_done(hw)) {
825 if (++asq_retries > IAVF_AQ_MAX_ERR) {
826 device_printf(dev, "%s: Admin Queue timeout "
827 "(waiting for send_vf_config_msg), attempt %d\n",
828 __func__, retried + 1);
835 iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
839 bufsz = sizeof(struct virtchnl_vf_resource) +
840 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
841 sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
844 "%s: Unable to allocate memory for VF configuration"
845 " message from PF on attempt %d\n", __func__, retried + 1);
851 /* Check for VF config response */
852 error = iavf_get_vf_config(sc);
853 if (error == ETIMEDOUT) {
854 /* The 1st time we timeout, send the configuration message again */
860 "%s: iavf_get_vf_config() timed out waiting for a response\n",
865 "%s: Unable to get VF configuration from PF after %d tries!\n",
866 __func__, retried + 1);
872 free(sc->vf_res, M_IAVF);
878 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
880 struct iavf_sc *sc = iflib_get_softc(ctx);
881 struct ixl_vsi *vsi = &sc->vsi;
882 struct ixl_rx_queue *rx_que = vsi->rx_queues;
883 struct ixl_tx_queue *tx_que = vsi->tx_queues;
884 int err, i, rid, vector = 0;
887 MPASS(vsi->shared->isc_nrxqsets > 0);
888 MPASS(vsi->shared->isc_ntxqsets > 0);
890 /* Admin Que is vector 0*/
892 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
893 iavf_msix_adminq, sc, 0, "aq");
895 iflib_irq_free(ctx, &vsi->irq);
896 device_printf(iflib_get_dev(ctx),
897 "Failed to register Admin Que handler");
901 /* Now set up the stations */
902 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
905 snprintf(buf, sizeof(buf), "rxq%d", i);
906 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
907 IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
908 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
909 * what's expected in the iflib context? */
911 device_printf(iflib_get_dev(ctx),
912 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
913 vsi->num_rx_queues = i + 1;
916 rx_que->msix = vector;
919 bzero(buf, sizeof(buf));
921 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
922 snprintf(buf, sizeof(buf), "txq%d", i);
923 iflib_softirq_alloc_generic(ctx,
924 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
925 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
927 /* TODO: Maybe call a strategy function for this to figure out which
928 * interrupts to map Tx queues to. I don't know if there's an immediately
929 * better way than this other than a user-supplied map, though. */
930 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
935 iflib_irq_free(ctx, &vsi->irq);
936 rx_que = vsi->rx_queues;
937 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
938 iflib_irq_free(ctx, &rx_que->que_irq);
942 /* Enable all interrupts */
944 iavf_if_enable_intr(if_ctx_t ctx)
946 struct iavf_sc *sc = iflib_get_softc(ctx);
947 struct ixl_vsi *vsi = &sc->vsi;
949 iavf_enable_intr(vsi);
952 /* Disable all interrupts */
954 iavf_if_disable_intr(if_ctx_t ctx)
956 struct iavf_sc *sc = iflib_get_softc(ctx);
957 struct ixl_vsi *vsi = &sc->vsi;
959 iavf_disable_intr(vsi);
963 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
965 struct iavf_sc *sc = iflib_get_softc(ctx);
966 struct ixl_vsi *vsi = &sc->vsi;
967 struct i40e_hw *hw = vsi->hw;
968 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
970 iavf_enable_queue_irq(hw, rx_que->msix - 1);
975 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
977 struct iavf_sc *sc = iflib_get_softc(ctx);
978 struct ixl_vsi *vsi = &sc->vsi;
979 struct i40e_hw *hw = vsi->hw;
980 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
982 iavf_enable_queue_irq(hw, tx_que->msix - 1);
987 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
989 struct iavf_sc *sc = iflib_get_softc(ctx);
990 struct ixl_vsi *vsi = &sc->vsi;
991 if_softc_ctx_t scctx = vsi->shared;
992 struct ixl_tx_queue *que;
995 MPASS(scctx->isc_ntxqsets > 0);
997 MPASS(scctx->isc_ntxqsets == ntxqsets);
999 /* Allocate queue structure memory */
1000 if (!(vsi->tx_queues =
1001 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1002 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1006 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1007 struct tx_ring *txr = &que->txr;
1012 if (!vsi->enable_head_writeback) {
1013 /* Allocate report status array */
1014 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1015 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1019 /* Init report status array */
1020 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1021 txr->tx_rsq[j] = QIDX_INVALID;
1023 /* get the virtual and physical address of the hardware queues */
1024 txr->tail = I40E_QTX_TAIL1(txr->me);
1025 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1026 txr->tx_paddr = paddrs[i * ntxqs];
1032 iavf_if_queues_free(ctx);
1037 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1039 struct iavf_sc *sc = iflib_get_softc(ctx);
1040 struct ixl_vsi *vsi = &sc->vsi;
1041 struct ixl_rx_queue *que;
1045 if_softc_ctx_t scctx = vsi->shared;
1046 MPASS(scctx->isc_nrxqsets > 0);
1048 MPASS(scctx->isc_nrxqsets == nrxqsets);
1051 /* Allocate queue structure memory */
1052 if (!(vsi->rx_queues =
1053 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1054 nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1055 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1060 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1061 struct rx_ring *rxr = &que->rxr;
1066 /* get the virtual and physical address of the hardware queues */
1067 rxr->tail = I40E_QRX_TAIL1(rxr->me);
1068 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1069 rxr->rx_paddr = paddrs[i * nrxqs];
1075 iavf_if_queues_free(ctx);
1080 iavf_if_queues_free(if_ctx_t ctx)
1082 struct iavf_sc *sc = iflib_get_softc(ctx);
1083 struct ixl_vsi *vsi = &sc->vsi;
1085 if (!vsi->enable_head_writeback) {
1086 struct ixl_tx_queue *que;
1089 for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1090 struct tx_ring *txr = &que->txr;
1091 if (txr->tx_rsq != NULL) {
1092 free(txr->tx_rsq, M_IAVF);
1098 if (vsi->tx_queues != NULL) {
1099 free(vsi->tx_queues, M_IAVF);
1100 vsi->tx_queues = NULL;
1102 if (vsi->rx_queues != NULL) {
1103 free(vsi->rx_queues, M_IAVF);
1104 vsi->rx_queues = NULL;
1109 iavf_check_aq_errors(struct iavf_sc *sc)
1111 struct i40e_hw *hw = &sc->hw;
1112 device_t dev = sc->dev;
1114 u8 aq_error = false;
1116 /* check for Admin queue errors */
1117 oldreg = reg = rd32(hw, hw->aq.arq.len);
1118 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1119 device_printf(dev, "ARQ VF Error detected\n");
1120 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1123 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1124 device_printf(dev, "ARQ Overflow Error detected\n");
1125 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1128 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1129 device_printf(dev, "ARQ Critical Error detected\n");
1130 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1134 wr32(hw, hw->aq.arq.len, reg);
1136 oldreg = reg = rd32(hw, hw->aq.asq.len);
1137 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1138 device_printf(dev, "ASQ VF Error detected\n");
1139 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1142 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1143 device_printf(dev, "ASQ Overflow Error detected\n");
1144 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1147 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1148 device_printf(dev, "ASQ Critical Error detected\n");
1149 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1153 wr32(hw, hw->aq.asq.len, reg);
1156 device_printf(dev, "WARNING: Stopping VF!\n");
1158 * A VF reset might not be enough to fix a problem here;
1159 * a PF reset could be required.
1161 sc->init_state = IAVF_RESET_REQUIRED;
1163 iavf_request_reset(sc);
1166 return (aq_error ? EIO : 0);
1169 static enum i40e_status_code
1170 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1172 enum i40e_status_code status = I40E_SUCCESS;
1173 struct i40e_arq_event_info event;
1174 struct i40e_hw *hw = &sc->hw;
1175 struct virtchnl_msg *v_msg;
1176 int error = 0, loop = 0;
1179 error = iavf_check_aq_errors(sc);
1181 return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1183 event.buf_len = IXL_AQ_BUF_SZ;
1184 event.msg_buf = sc->aq_buffer;
1185 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1186 v_msg = (struct virtchnl_msg *)&event.desc;
1188 /* clean and process any events */
1190 status = i40e_clean_arq_element(hw, &event, pending);
1192 * Also covers normal case when i40e_clean_arq_element()
1193 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1197 iavf_vc_completion(sc, v_msg->v_opcode,
1198 v_msg->v_retval, event.msg_buf, event.msg_len);
1199 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1200 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1202 /* Re-enable admin queue interrupt cause */
1203 reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1204 reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1205 wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1211 iavf_if_update_admin_status(if_ctx_t ctx)
1213 struct iavf_sc *sc = iflib_get_softc(ctx);
1214 struct i40e_hw *hw = &sc->hw;
1217 iavf_process_adminq(sc, &pending);
1218 iavf_update_link_status(sc);
1221 * If there are still messages to process, reschedule.
1222 * Otherwise, re-enable the Admin Queue interrupt.
1225 iflib_admin_intr_deferred(ctx);
1227 iavf_enable_adminq_irq(hw);
1231 iavf_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused)
1233 struct iavf_sc *sc = arg;
1236 error = iavf_add_mac_filter(sc, (u8*)LLADDR(sdl), IXL_FILTER_MC);
1241 iavf_if_multi_set(if_ctx_t ctx)
1243 struct iavf_sc *sc = iflib_get_softc(ctx);
1245 IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1247 if (__predict_false(if_llmaddr_count(iflib_get_ifp(ctx)) >=
1248 MAX_MULTICAST_ADDR)) {
1249 /* Delete MC filters and enable mulitcast promisc instead */
1250 iavf_init_multi(sc);
1251 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1252 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1256 /* If there aren't too many filters, delete existing MC filters */
1257 iavf_init_multi(sc);
1259 /* And (re-)install filters for all mcast addresses */
1260 if (if_foreach_llmaddr(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc) >
1262 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1266 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1268 struct iavf_sc *sc = iflib_get_softc(ctx);
1269 struct ixl_vsi *vsi = &sc->vsi;
1271 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1272 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1273 ETHER_VLAN_ENCAP_LEN)
1276 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1277 ETHER_VLAN_ENCAP_LEN;
1283 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1286 struct ifnet *ifp = iflib_get_ifp(ctx);
1288 struct iavf_sc *sc = iflib_get_softc(ctx);
1290 INIT_DBG_IF(ifp, "begin");
1292 iavf_update_link_status(sc);
1294 ifmr->ifm_status = IFM_AVALID;
1295 ifmr->ifm_active = IFM_ETHER;
1300 ifmr->ifm_status |= IFM_ACTIVE;
1301 /* Hardware is always full-duplex */
1302 ifmr->ifm_active |= IFM_FDX;
1304 /* Based on the link speed reported by the PF over the AdminQ, choose a
1305 * PHY type to report. This isn't 100% correct since we don't really
1306 * know the underlying PHY type of the PF, but at least we can report
1307 * a valid link speed...
1309 switch (sc->link_speed) {
1310 case VIRTCHNL_LINK_SPEED_100MB:
1311 ifmr->ifm_active |= IFM_100_TX;
1313 case VIRTCHNL_LINK_SPEED_1GB:
1314 ifmr->ifm_active |= IFM_1000_T;
1316 case VIRTCHNL_LINK_SPEED_10GB:
1317 ifmr->ifm_active |= IFM_10G_SR;
1319 case VIRTCHNL_LINK_SPEED_20GB:
1320 case VIRTCHNL_LINK_SPEED_25GB:
1321 ifmr->ifm_active |= IFM_25G_SR;
1323 case VIRTCHNL_LINK_SPEED_40GB:
1324 ifmr->ifm_active |= IFM_40G_SR4;
1327 ifmr->ifm_active |= IFM_UNKNOWN;
1331 INIT_DBG_IF(ifp, "end");
1335 iavf_if_media_change(if_ctx_t ctx)
1337 struct ifmedia *ifm = iflib_get_media(ctx);
1339 INIT_DEBUGOUT("ixl_media_change: begin");
1341 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1344 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1349 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1351 struct iavf_sc *sc = iflib_get_softc(ctx);
1352 struct ifnet *ifp = iflib_get_ifp(ctx);
1354 sc->promisc_flags = 0;
1356 if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1358 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1359 if (flags & IFF_PROMISC)
1360 sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1362 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1368 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1370 struct iavf_sc *sc = iflib_get_softc(ctx);
1371 struct i40e_hw *hw = &sc->hw;
1377 /* Check for when PF triggers a VF reset */
1378 val = rd32(hw, I40E_VFGEN_RSTAT) &
1379 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1380 if (val != VIRTCHNL_VFR_VFACTIVE
1381 && val != VIRTCHNL_VFR_COMPLETED) {
1382 iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1386 /* Fire off the adminq task */
1387 iflib_admin_intr_deferred(ctx);
1390 iavf_request_stats(sc);
1394 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1396 struct iavf_sc *sc = iflib_get_softc(ctx);
1397 struct ixl_vsi *vsi = &sc->vsi;
1398 struct iavf_vlan_filter *v;
1400 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1404 v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1405 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1407 v->flags = IXL_FILTER_ADD;
1409 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1413 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1415 struct iavf_sc *sc = iflib_get_softc(ctx);
1416 struct ixl_vsi *vsi = &sc->vsi;
1417 struct iavf_vlan_filter *v;
1420 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1423 SLIST_FOREACH(v, sc->vlan_filters, next) {
1424 if (v->vlan == vtag) {
1425 v->flags = IXL_FILTER_DEL;
1431 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1435 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1437 struct iavf_sc *sc = iflib_get_softc(ctx);
1438 struct ixl_vsi *vsi = &sc->vsi;
1439 if_t ifp = iflib_get_ifp(ctx);
1442 case IFCOUNTER_IPACKETS:
1443 return (vsi->ipackets);
1444 case IFCOUNTER_IERRORS:
1445 return (vsi->ierrors);
1446 case IFCOUNTER_OPACKETS:
1447 return (vsi->opackets);
1448 case IFCOUNTER_OERRORS:
1449 return (vsi->oerrors);
1450 case IFCOUNTER_COLLISIONS:
1451 /* Collisions are by standard impossible in 40G/10G Ethernet */
1453 case IFCOUNTER_IBYTES:
1454 return (vsi->ibytes);
1455 case IFCOUNTER_OBYTES:
1456 return (vsi->obytes);
1457 case IFCOUNTER_IMCASTS:
1458 return (vsi->imcasts);
1459 case IFCOUNTER_OMCASTS:
1460 return (vsi->omcasts);
1461 case IFCOUNTER_IQDROPS:
1462 return (vsi->iqdrops);
1463 case IFCOUNTER_OQDROPS:
1464 return (vsi->oqdrops);
1465 case IFCOUNTER_NOPROTO:
1466 return (vsi->noproto);
1468 return (if_get_counter_default(ifp, cnt));
1472 /* iavf_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1473 * @ctx: iflib context
1474 * @event: event code to check
1476 * Defaults to returning true for every event.
1478 * @returns true if iflib needs to reinit the interface
1481 iavf_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1484 case IFLIB_RESTART_VLAN_CONFIG:
1485 /* This case must return true if VLAN anti-spoof checks are
1486 * enabled by the PF driver for the VF.
1494 iavf_free_pci_resources(struct iavf_sc *sc)
1496 struct ixl_vsi *vsi = &sc->vsi;
1497 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1498 device_t dev = sc->dev;
1500 /* We may get here before stations are set up */
1504 /* Release all interrupts */
1505 iflib_irq_free(vsi->ctx, &vsi->irq);
1507 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1508 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1511 if (sc->pci_mem != NULL)
1512 bus_release_resource(dev, SYS_RES_MEMORY,
1513 rman_get_rid(sc->pci_mem), sc->pci_mem);
1518 ** Requests a VF reset from the PF.
1520 ** Requires the VF's Admin Queue to be initialized.
1523 iavf_reset(struct iavf_sc *sc)
1525 struct i40e_hw *hw = &sc->hw;
1526 device_t dev = sc->dev;
1529 /* Ask the PF to reset us if we are initiating */
1530 if (sc->init_state != IAVF_RESET_PENDING)
1531 iavf_request_reset(sc);
1533 i40e_msec_pause(100);
1534 error = iavf_reset_complete(hw);
1536 device_printf(dev, "%s: VF reset failed\n",
1540 pci_enable_busmaster(dev);
1542 error = i40e_shutdown_adminq(hw);
1544 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1549 error = i40e_init_adminq(hw);
1551 device_printf(dev, "%s: init_adminq failed: %d\n",
1556 iavf_enable_adminq_irq(hw);
1561 iavf_reset_complete(struct i40e_hw *hw)
1565 /* Wait up to ~10 seconds */
1566 for (int i = 0; i < 100; i++) {
1567 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1568 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1570 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1571 (reg == VIRTCHNL_VFR_COMPLETED))
1573 i40e_msec_pause(100);
1580 iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1582 struct ixl_vsi *vsi = &sc->vsi;
1583 if_ctx_t ctx = vsi->ctx;
1584 struct ifnet *ifp = iflib_get_ifp(ctx);
1586 INIT_DBG_DEV(dev, "begin");
1588 vsi->shared->isc_max_frame_size =
1589 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1590 + ETHER_VLAN_ENCAP_LEN;
1591 #if __FreeBSD_version >= 1100000
1592 if_setbaudrate(ifp, IF_Gbps(40));
1594 if_initbaudrate(ifp, IF_Gbps(40));
1597 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1598 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1602 ** Get a new filter and add it to the mac filter list.
1604 static struct iavf_mac_filter *
1605 iavf_get_mac_filter(struct iavf_sc *sc)
1607 struct iavf_mac_filter *f;
1609 f = malloc(sizeof(struct iavf_mac_filter),
1610 M_IAVF, M_NOWAIT | M_ZERO);
1612 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1618 ** Find the filter with matching MAC address
1620 static struct iavf_mac_filter *
1621 iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1623 struct iavf_mac_filter *f;
1626 SLIST_FOREACH(f, sc->mac_filters, next) {
1627 if (cmp_etheraddr(f->macaddr, macaddr)) {
1639 ** Admin Queue interrupt handler
1642 iavf_msix_adminq(void *arg)
1644 struct iavf_sc *sc = arg;
1645 struct i40e_hw *hw = &sc->hw;
1647 bool do_task = FALSE;
1651 reg = rd32(hw, I40E_VFINT_ICR01);
1653 * For masking off interrupt causes that need to be handled before
1654 * they can be re-enabled
1656 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1658 /* Check on the cause */
1659 if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1660 mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1664 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1665 iavf_enable_adminq_irq(hw);
1668 return (FILTER_SCHEDULE_THREAD);
1670 return (FILTER_HANDLED);
1674 iavf_enable_intr(struct ixl_vsi *vsi)
1676 struct i40e_hw *hw = vsi->hw;
1677 struct ixl_rx_queue *que = vsi->rx_queues;
1679 iavf_enable_adminq_irq(hw);
1680 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1681 iavf_enable_queue_irq(hw, que->rxr.me);
1685 iavf_disable_intr(struct ixl_vsi *vsi)
1687 struct i40e_hw *hw = vsi->hw;
1688 struct ixl_rx_queue *que = vsi->rx_queues;
1690 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1691 iavf_disable_queue_irq(hw, que->rxr.me);
1695 iavf_disable_adminq_irq(struct i40e_hw *hw)
1697 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1698 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1700 rd32(hw, I40E_VFGEN_RSTAT);
1704 iavf_enable_adminq_irq(struct i40e_hw *hw)
1706 wr32(hw, I40E_VFINT_DYN_CTL01,
1707 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1708 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1709 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1711 rd32(hw, I40E_VFGEN_RSTAT);
1715 iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1719 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1720 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1721 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1722 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1726 iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1728 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1729 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1730 rd32(hw, I40E_VFGEN_RSTAT);
1734 iavf_configure_tx_itr(struct iavf_sc *sc)
1736 struct i40e_hw *hw = &sc->hw;
1737 struct ixl_vsi *vsi = &sc->vsi;
1738 struct ixl_tx_queue *que = vsi->tx_queues;
1740 vsi->tx_itr_setting = sc->tx_itr;
1742 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1743 struct tx_ring *txr = &que->txr;
1745 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1746 vsi->tx_itr_setting);
1747 txr->itr = vsi->tx_itr_setting;
1748 txr->latency = IXL_AVE_LATENCY;
1753 iavf_configure_rx_itr(struct iavf_sc *sc)
1755 struct i40e_hw *hw = &sc->hw;
1756 struct ixl_vsi *vsi = &sc->vsi;
1757 struct ixl_rx_queue *que = vsi->rx_queues;
1759 vsi->rx_itr_setting = sc->rx_itr;
1761 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1762 struct rx_ring *rxr = &que->rxr;
1764 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1765 vsi->rx_itr_setting);
1766 rxr->itr = vsi->rx_itr_setting;
1767 rxr->latency = IXL_AVE_LATENCY;
1772 * Get initial ITR values from tunable values.
1775 iavf_configure_itr(struct iavf_sc *sc)
1777 iavf_configure_tx_itr(sc);
1778 iavf_configure_rx_itr(sc);
1782 ** Provide a update to the queue RX
1783 ** interrupt moderation value.
1786 iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1788 struct ixl_vsi *vsi = que->vsi;
1789 struct i40e_hw *hw = vsi->hw;
1790 struct rx_ring *rxr = &que->rxr;
1792 /* Idle, do nothing */
1793 if (rxr->bytes == 0)
1796 /* Update the hardware if needed */
1797 if (rxr->itr != vsi->rx_itr_setting) {
1798 rxr->itr = vsi->rx_itr_setting;
1799 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1800 que->rxr.me), rxr->itr);
1805 iavf_msix_que(void *arg)
1807 struct ixl_rx_queue *rx_que = arg;
1811 iavf_set_queue_rx_itr(rx_que);
1812 // iavf_set_queue_tx_itr(que);
1814 return (FILTER_SCHEDULE_THREAD);
1817 /*********************************************************************
1818 * Multicast Initialization
1820 * This routine is called by init to reset a fresh state.
1822 **********************************************************************/
1824 iavf_init_multi(struct iavf_sc *sc)
1826 struct iavf_mac_filter *f;
1829 /* First clear any multicast filters */
1830 SLIST_FOREACH(f, sc->mac_filters, next) {
1831 if ((f->flags & IXL_FILTER_USED)
1832 && (f->flags & IXL_FILTER_MC)) {
1833 f->flags |= IXL_FILTER_DEL;
1838 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1842 ** Note: this routine updates the OS on the link state
1843 ** the real check of the hardware only happens with
1844 ** a link interrupt.
1847 iavf_update_link_status(struct iavf_sc *sc)
1849 struct ixl_vsi *vsi = &sc->vsi;
1853 if (vsi->link_active == FALSE) {
1854 vsi->link_active = TRUE;
1855 baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1856 iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1857 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1859 } else { /* Link down */
1860 if (vsi->link_active == TRUE) {
1861 vsi->link_active = FALSE;
1862 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1867 /*********************************************************************
1869 * This routine disables all traffic on the adapter by issuing a
1870 * global reset on the MAC and deallocates TX/RX buffers.
1872 **********************************************************************/
1875 iavf_stop(struct iavf_sc *sc)
1881 iavf_disable_intr(&sc->vsi);
1883 if (atomic_load_acq_32(&sc->queues_enabled))
1884 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1888 iavf_if_stop(if_ctx_t ctx)
1890 struct iavf_sc *sc = iflib_get_softc(ctx);
1896 iavf_config_rss_reg(struct iavf_sc *sc)
1898 struct i40e_hw *hw = &sc->hw;
1899 struct ixl_vsi *vsi = &sc->vsi;
1901 u64 set_hena = 0, hena;
1903 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1905 u32 rss_hash_config;
1908 /* Don't set up RSS if using a single queue */
1909 if (vsi->num_rx_queues == 1) {
1910 wr32(hw, I40E_VFQF_HENA(0), 0);
1911 wr32(hw, I40E_VFQF_HENA(1), 0);
1917 /* Fetch the configured RSS key */
1918 rss_getkey((uint8_t *) &rss_seed);
1920 ixl_get_default_rss_key(rss_seed);
1923 /* Fill out hash function seed */
1924 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1925 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1927 /* Enable PCTYPES for RSS: */
1929 rss_hash_config = rss_gethashconfig();
1930 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1931 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1932 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1933 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1934 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1935 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1936 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1937 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1938 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1939 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1940 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1941 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1942 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1943 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1945 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1947 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1948 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1950 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1951 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1953 /* Populate the LUT with max no. of queues in round robin fashion */
1954 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1955 if (j == vsi->num_rx_queues)
1959 * Fetch the RSS bucket id for the given indirection entry.
1960 * Cap it at the number of configured buckets (which is
1963 que_id = rss_get_indirection_to_bucket(i);
1964 que_id = que_id % vsi->num_rx_queues;
1968 /* lut = 4-byte sliding window of 4 lut entries */
1969 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1970 /* On i = 3, we have 4 entries in lut; write to the register */
1972 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1973 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1980 iavf_config_rss_pf(struct iavf_sc *sc)
1982 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1984 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1986 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1990 ** iavf_config_rss - setup RSS
1992 ** RSS keys and table are cleared on VF reset.
1995 iavf_config_rss(struct iavf_sc *sc)
1997 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1998 iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1999 iavf_config_rss_reg(sc);
2000 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2001 iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
2002 iavf_config_rss_pf(sc);
2004 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
2008 ** This routine adds new MAC filters to the sc's list;
2009 ** these are later added in hardware by sending a virtual
2013 iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
2015 struct iavf_mac_filter *f;
2017 /* Does one already exist? */
2018 f = iavf_find_mac_filter(sc, macaddr);
2020 iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
2021 MAC_FORMAT_ARGS(macaddr));
2025 /* If not, get a new empty filter */
2026 f = iavf_get_mac_filter(sc);
2028 device_printf(sc->dev, "%s: no filters available!!\n",
2033 iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2034 MAC_FORMAT_ARGS(macaddr));
2036 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2037 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2043 ** Marks a MAC filter for deletion.
2046 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2048 struct iavf_mac_filter *f;
2050 f = iavf_find_mac_filter(sc, macaddr);
2054 f->flags |= IXL_FILTER_DEL;
2059 * Re-uses the name from the PF driver.
2062 iavf_add_device_sysctls(struct iavf_sc *sc)
2064 struct ixl_vsi *vsi = &sc->vsi;
2065 device_t dev = sc->dev;
2067 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2068 struct sysctl_oid_list *ctx_list =
2069 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2070 struct sysctl_oid *debug_node;
2071 struct sysctl_oid_list *debug_list;
2073 SYSCTL_ADD_PROC(ctx, ctx_list,
2074 OID_AUTO, "current_speed",
2075 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2076 sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2078 SYSCTL_ADD_PROC(ctx, ctx_list,
2080 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2081 sc, 0, iavf_sysctl_tx_itr, "I",
2082 "Immediately set TX ITR value for all queues");
2084 SYSCTL_ADD_PROC(ctx, ctx_list,
2086 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2087 sc, 0, iavf_sysctl_rx_itr, "I",
2088 "Immediately set RX ITR value for all queues");
2090 /* Add sysctls meant to print debug information, but don't list them
2091 * in "sysctl -a" output. */
2092 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2093 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_NEEDGIANT,
2094 NULL, "Debug Sysctls");
2095 debug_list = SYSCTL_CHILDREN(debug_node);
2097 SYSCTL_ADD_UINT(ctx, debug_list,
2098 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2099 &sc->hw.debug_mask, 0, "Shared code debug message level");
2101 SYSCTL_ADD_UINT(ctx, debug_list,
2102 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2103 &sc->dbg_mask, 0, "Non-shared code debug message level");
2105 SYSCTL_ADD_PROC(ctx, debug_list,
2106 OID_AUTO, "filter_list",
2107 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2108 sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2110 SYSCTL_ADD_PROC(ctx, debug_list,
2111 OID_AUTO, "queue_interrupt_table",
2112 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2113 sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2115 SYSCTL_ADD_PROC(ctx, debug_list,
2116 OID_AUTO, "do_vf_reset",
2117 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2118 sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2120 SYSCTL_ADD_PROC(ctx, debug_list,
2121 OID_AUTO, "do_vflr_reset",
2122 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2123 sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2125 /* Add stats sysctls */
2126 ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2127 ixl_vsi_add_queues_stats(vsi, ctx);
2132 iavf_init_filters(struct iavf_sc *sc)
2134 sc->mac_filters = malloc(sizeof(struct mac_list),
2135 M_IAVF, M_WAITOK | M_ZERO);
2136 SLIST_INIT(sc->mac_filters);
2137 sc->vlan_filters = malloc(sizeof(struct vlan_list),
2138 M_IAVF, M_WAITOK | M_ZERO);
2139 SLIST_INIT(sc->vlan_filters);
2143 iavf_free_filters(struct iavf_sc *sc)
2145 struct iavf_mac_filter *f;
2146 struct iavf_vlan_filter *v;
2148 while (!SLIST_EMPTY(sc->mac_filters)) {
2149 f = SLIST_FIRST(sc->mac_filters);
2150 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2153 free(sc->mac_filters, M_IAVF);
2154 while (!SLIST_EMPTY(sc->vlan_filters)) {
2155 v = SLIST_FIRST(sc->vlan_filters);
2156 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2159 free(sc->vlan_filters, M_IAVF);
2163 iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2177 switch (link_speed) {
2178 case VIRTCHNL_LINK_SPEED_100MB:
2181 case VIRTCHNL_LINK_SPEED_1GB:
2184 case VIRTCHNL_LINK_SPEED_10GB:
2187 case VIRTCHNL_LINK_SPEED_40GB:
2190 case VIRTCHNL_LINK_SPEED_20GB:
2193 case VIRTCHNL_LINK_SPEED_25GB:
2196 case VIRTCHNL_LINK_SPEED_UNKNOWN:
2202 return speeds[index];
2206 iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2208 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2211 error = sysctl_handle_string(oidp,
2212 iavf_vc_speed_to_string(sc->link_speed),
2218 * Sanity check and save off tunable values.
2221 iavf_save_tunables(struct iavf_sc *sc)
2223 device_t dev = sc->dev;
2225 /* Save tunable information */
2226 sc->dbg_mask = iavf_core_debug_mask;
2227 sc->hw.debug_mask = iavf_shared_debug_mask;
2228 sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2230 if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2231 device_printf(dev, "Invalid tx_itr value of %d set!\n",
2233 device_printf(dev, "tx_itr must be between %d and %d, "
2236 device_printf(dev, "Using default value of %d instead\n",
2238 sc->tx_itr = IXL_ITR_4K;
2240 sc->tx_itr = iavf_tx_itr;
2242 if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2243 device_printf(dev, "Invalid rx_itr value of %d set!\n",
2245 device_printf(dev, "rx_itr must be between %d and %d, "
2248 device_printf(dev, "Using default value of %d instead\n",
2250 sc->rx_itr = IXL_ITR_8K;
2252 sc->rx_itr = iavf_rx_itr;
2256 * Used to set the Tx ITR value for all of the VF's queues.
2257 * Writes to the ITR registers immediately.
2260 iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2262 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2263 device_t dev = sc->dev;
2264 int requested_tx_itr;
2267 requested_tx_itr = sc->tx_itr;
2268 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2269 if ((error) || (req->newptr == NULL))
2271 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2273 "Invalid TX itr value; value must be between 0 and %d\n",
2278 sc->tx_itr = requested_tx_itr;
2279 iavf_configure_tx_itr(sc);
2285 * Used to set the Rx ITR value for all of the VF's queues.
2286 * Writes to the ITR registers immediately.
2289 iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2291 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2292 device_t dev = sc->dev;
2293 int requested_rx_itr;
2296 requested_rx_itr = sc->rx_itr;
2297 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2298 if ((error) || (req->newptr == NULL))
2300 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2302 "Invalid RX itr value; value must be between 0 and %d\n",
2307 sc->rx_itr = requested_rx_itr;
2308 iavf_configure_rx_itr(sc);
2314 iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2316 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2317 struct iavf_mac_filter *f;
2318 struct iavf_vlan_filter *v;
2319 device_t dev = sc->dev;
2320 int ftl_len, ftl_counter = 0, error = 0;
2323 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2325 device_printf(dev, "Could not allocate sbuf for output.\n");
2329 sbuf_printf(buf, "\n");
2331 /* Print MAC filters */
2332 sbuf_printf(buf, "MAC Filters:\n");
2334 SLIST_FOREACH(f, sc->mac_filters, next)
2337 sbuf_printf(buf, "(none)\n");
2339 SLIST_FOREACH(f, sc->mac_filters, next) {
2341 MAC_FORMAT ", flags %#06x\n",
2342 MAC_FORMAT_ARGS(f->macaddr), f->flags);
2346 /* Print VLAN filters */
2347 sbuf_printf(buf, "VLAN Filters:\n");
2349 SLIST_FOREACH(v, sc->vlan_filters, next)
2352 sbuf_printf(buf, "(none)");
2354 SLIST_FOREACH(v, sc->vlan_filters, next) {
2358 /* don't print '\n' for last entry */
2359 if (++ftl_counter != ftl_len)
2360 sbuf_printf(buf, "\n");
2364 error = sbuf_finish(buf);
2366 device_printf(dev, "Error finishing sbuf: %d\n", error);
2373 * Print out mapping of TX queue indexes and Rx queue indexes
2377 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2379 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2380 struct ixl_vsi *vsi = &sc->vsi;
2381 device_t dev = sc->dev;
2385 struct ixl_rx_queue *rx_que = vsi->rx_queues;
2386 struct ixl_tx_queue *tx_que = vsi->tx_queues;
2388 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2390 device_printf(dev, "Could not allocate sbuf for output.\n");
2394 sbuf_cat(buf, "\n");
2395 for (int i = 0; i < vsi->num_rx_queues; i++) {
2396 rx_que = &vsi->rx_queues[i];
2397 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2399 for (int i = 0; i < vsi->num_tx_queues; i++) {
2400 tx_que = &vsi->tx_queues[i];
2401 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2404 error = sbuf_finish(buf);
2406 device_printf(dev, "Error finishing sbuf: %d\n", error);
2412 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2414 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2416 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2417 int do_reset = 0, error = 0;
2419 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2420 if ((error) || (req->newptr == NULL))
2423 if (do_reset == 1) {
2425 if (CTX_ACTIVE(sc->vsi.ctx))
2426 iflib_request_reset(sc->vsi.ctx);
2433 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2435 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2436 device_t dev = sc->dev;
2437 int do_reset = 0, error = 0;
2439 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2440 if ((error) || (req->newptr == NULL))
2443 if (do_reset == 1) {
2444 if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2445 device_printf(dev, "PCIE FLR failed\n");
2448 else if (CTX_ACTIVE(sc->vsi.ctx))
2449 iflib_request_reset(sc->vsi.ctx);