1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 /*********************************************************************
39 *********************************************************************/
40 #define IAVF_DRIVER_VERSION_MAJOR 2
41 #define IAVF_DRIVER_VERSION_MINOR 0
42 #define IAVF_DRIVER_VERSION_BUILD 0
44 #define IAVF_DRIVER_VERSION_STRING \
45 __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \
46 __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \
47 __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k"
49 /*********************************************************************
52 * Used by probe to select devices to load on
54 * ( Vendor ID, Device ID, Branding String )
55 *********************************************************************/
57 static pci_vendor_info_t iavf_vendor_info_array[] =
59 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"),
60 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"),
61 PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"),
62 /* required last entry */
66 /*********************************************************************
68 *********************************************************************/
69 static void *iavf_register(device_t dev);
70 static int iavf_if_attach_pre(if_ctx_t ctx);
71 static int iavf_if_attach_post(if_ctx_t ctx);
72 static int iavf_if_detach(if_ctx_t ctx);
73 static int iavf_if_shutdown(if_ctx_t ctx);
74 static int iavf_if_suspend(if_ctx_t ctx);
75 static int iavf_if_resume(if_ctx_t ctx);
76 static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
77 static void iavf_if_enable_intr(if_ctx_t ctx);
78 static void iavf_if_disable_intr(if_ctx_t ctx);
79 static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
80 static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
81 static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
82 static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
83 static void iavf_if_queues_free(if_ctx_t ctx);
84 static void iavf_if_update_admin_status(if_ctx_t ctx);
85 static void iavf_if_multi_set(if_ctx_t ctx);
86 static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
87 static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
88 static int iavf_if_media_change(if_ctx_t ctx);
89 static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
90 static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
91 static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
92 static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
93 static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
94 static void iavf_if_stop(if_ctx_t ctx);
96 static int iavf_allocate_pci_resources(struct iavf_sc *);
97 static int iavf_reset_complete(struct i40e_hw *);
98 static int iavf_setup_vc(struct iavf_sc *);
99 static int iavf_reset(struct iavf_sc *);
100 static int iavf_vf_config(struct iavf_sc *);
101 static void iavf_init_filters(struct iavf_sc *);
102 static void iavf_free_pci_resources(struct iavf_sc *);
103 static void iavf_free_filters(struct iavf_sc *);
104 static void iavf_setup_interface(device_t, struct iavf_sc *);
105 static void iavf_add_device_sysctls(struct iavf_sc *);
106 static void iavf_enable_adminq_irq(struct i40e_hw *);
107 static void iavf_disable_adminq_irq(struct i40e_hw *);
108 static void iavf_enable_queue_irq(struct i40e_hw *, int);
109 static void iavf_disable_queue_irq(struct i40e_hw *, int);
110 static void iavf_config_rss(struct iavf_sc *);
111 static void iavf_stop(struct iavf_sc *);
113 static int iavf_add_mac_filter(struct iavf_sc *, u8 *, u16);
114 static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
115 static int iavf_msix_que(void *);
116 static int iavf_msix_adminq(void *);
117 //static void iavf_del_multi(struct iavf_sc *sc);
118 static void iavf_init_multi(struct iavf_sc *sc);
119 static void iavf_configure_itr(struct iavf_sc *sc);
121 static int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
122 static int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
123 static int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
124 static int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
125 static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
126 static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
127 static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
129 static void iavf_save_tunables(struct iavf_sc *);
130 static enum i40e_status_code
131 iavf_process_adminq(struct iavf_sc *, u16 *);
132 static int iavf_send_vc_msg(struct iavf_sc *sc, u32 op);
133 static int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op);
135 /*********************************************************************
136 * FreeBSD Device Interface Entry Points
137 *********************************************************************/
139 static device_method_t iavf_methods[] = {
140 /* Device interface */
141 DEVMETHOD(device_register, iavf_register),
142 DEVMETHOD(device_probe, iflib_device_probe),
143 DEVMETHOD(device_attach, iflib_device_attach),
144 DEVMETHOD(device_detach, iflib_device_detach),
145 DEVMETHOD(device_shutdown, iflib_device_shutdown),
149 static driver_t iavf_driver = {
150 "iavf", iavf_methods, sizeof(struct iavf_sc),
153 devclass_t iavf_devclass;
154 DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0);
155 MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision",
156 pci, iavf, iavf_vendor_info_array,
157 nitems(iavf_vendor_info_array) - 1);
158 MODULE_VERSION(iavf, 1);
160 MODULE_DEPEND(iavf, pci, 1, 1, 1);
161 MODULE_DEPEND(iavf, ether, 1, 1, 1);
162 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
164 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
166 static device_method_t iavf_if_methods[] = {
167 DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
168 DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
169 DEVMETHOD(ifdi_detach, iavf_if_detach),
170 DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
171 DEVMETHOD(ifdi_suspend, iavf_if_suspend),
172 DEVMETHOD(ifdi_resume, iavf_if_resume),
173 DEVMETHOD(ifdi_init, iavf_if_init),
174 DEVMETHOD(ifdi_stop, iavf_if_stop),
175 DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
176 DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
177 DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
178 DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
179 DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
180 DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
181 DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
182 DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
183 DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
184 DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
185 DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
186 DEVMETHOD(ifdi_media_status, iavf_if_media_status),
187 DEVMETHOD(ifdi_media_change, iavf_if_media_change),
188 DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
189 DEVMETHOD(ifdi_timer, iavf_if_timer),
190 DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
191 DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
192 DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
196 static driver_t iavf_if_driver = {
197 "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
201 ** TUNEABLE PARAMETERS:
204 static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0,
205 "iavf driver parameters");
208 * Different method for processing TX descriptor
211 static int iavf_enable_head_writeback = 0;
212 TUNABLE_INT("hw.iavf.enable_head_writeback",
213 &iavf_enable_head_writeback);
214 SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
215 &iavf_enable_head_writeback, 0,
216 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
218 static int iavf_core_debug_mask = 0;
219 TUNABLE_INT("hw.iavf.core_debug_mask",
220 &iavf_core_debug_mask);
221 SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
222 &iavf_core_debug_mask, 0,
223 "Display debug statements that are printed in non-shared code");
225 static int iavf_shared_debug_mask = 0;
226 TUNABLE_INT("hw.iavf.shared_debug_mask",
227 &iavf_shared_debug_mask);
228 SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
229 &iavf_shared_debug_mask, 0,
230 "Display debug statements that are printed in shared code");
232 int iavf_rx_itr = IXL_ITR_8K;
233 TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr);
234 SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
235 &iavf_rx_itr, 0, "RX Interrupt Rate");
237 int iavf_tx_itr = IXL_ITR_4K;
238 TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr);
239 SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
240 &iavf_tx_itr, 0, "TX Interrupt Rate");
242 extern struct if_txrx ixl_txrx_hwb;
243 extern struct if_txrx ixl_txrx_dwb;
245 static struct if_shared_ctx iavf_sctx_init = {
246 .isc_magic = IFLIB_MAGIC,
247 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
248 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
249 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
250 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
251 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
252 .isc_rx_maxsize = 16384,
253 .isc_rx_nsegments = IXL_MAX_RX_SEGS,
254 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
259 .isc_admin_intrcnt = 1,
260 .isc_vendor_info = iavf_vendor_info_array,
261 .isc_driver_version = IAVF_DRIVER_VERSION_STRING,
262 .isc_driver = &iavf_if_driver,
263 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
265 .isc_nrxd_min = {IXL_MIN_RING},
266 .isc_ntxd_min = {IXL_MIN_RING},
267 .isc_nrxd_max = {IXL_MAX_RING},
268 .isc_ntxd_max = {IXL_MAX_RING},
269 .isc_nrxd_default = {IXL_DEFAULT_RING},
270 .isc_ntxd_default = {IXL_DEFAULT_RING},
273 if_shared_ctx_t iavf_sctx = &iavf_sctx_init;
277 iavf_register(device_t dev)
283 iavf_allocate_pci_resources(struct iavf_sc *sc)
285 struct i40e_hw *hw = &sc->hw;
286 device_t dev = iflib_get_dev(sc->vsi.ctx);
291 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
294 if (!(sc->pci_mem)) {
295 device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
299 /* Save off the PCI information */
300 hw->vendor_id = pci_get_vendor(dev);
301 hw->device_id = pci_get_device(dev);
302 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
303 hw->subsystem_vendor_id =
304 pci_read_config(dev, PCIR_SUBVEND_0, 2);
305 hw->subsystem_device_id =
306 pci_read_config(dev, PCIR_SUBDEV_0, 2);
308 hw->bus.device = pci_get_slot(dev);
309 hw->bus.func = pci_get_function(dev);
311 /* Save off register access information */
312 sc->osdep.mem_bus_space_tag =
313 rman_get_bustag(sc->pci_mem);
314 sc->osdep.mem_bus_space_handle =
315 rman_get_bushandle(sc->pci_mem);
316 sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
317 sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
320 sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
321 sc->hw.back = &sc->osdep;
327 iavf_if_attach_pre(if_ctx_t ctx)
333 if_softc_ctx_t scctx;
336 dev = iflib_get_dev(ctx);
337 sc = iflib_get_softc(ctx);
348 vsi->media = iflib_get_media(ctx);
349 vsi->shared = scctx = iflib_get_softc_ctx(ctx);
351 iavf_save_tunables(sc);
353 /* Do PCI setup - map BAR0, etc */
354 if (iavf_allocate_pci_resources(sc)) {
355 device_printf(dev, "%s: Allocation of PCI resources failed\n",
361 iavf_dbg_init(sc, "Allocated PCI resources and MSIX vectors\n");
364 * XXX: This is called by init_shared_code in the PF driver,
365 * but the rest of that function does not support VFs.
367 error = i40e_set_mac_type(hw);
369 device_printf(dev, "%s: set_mac_type failed: %d\n",
374 error = iavf_reset_complete(hw);
376 device_printf(dev, "%s: Device is still being reset\n",
381 iavf_dbg_init(sc, "VF Device is ready for configuration\n");
383 /* Sets up Admin Queue */
384 error = iavf_setup_vc(sc);
386 device_printf(dev, "%s: Error setting up PF comms, %d\n",
391 iavf_dbg_init(sc, "PF API version verified\n");
393 /* Need API version before sending reset message */
394 error = iavf_reset(sc);
396 device_printf(dev, "VF reset failed; reload the driver\n");
400 iavf_dbg_init(sc, "VF reset complete\n");
402 /* Ask for VF config from PF */
403 error = iavf_vf_config(sc);
405 device_printf(dev, "Error getting configuration from PF: %d\n",
410 device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
411 sc->vf_res->num_vsis,
412 sc->vf_res->num_queue_pairs,
413 sc->vf_res->max_vectors,
414 sc->vf_res->rss_key_size,
415 sc->vf_res->rss_lut_size);
416 iavf_dbg_info(sc, "Capabilities=%b\n",
417 sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
419 /* got VF config message back from PF, now we can parse it */
420 for (int i = 0; i < sc->vf_res->num_vsis; i++) {
421 /* XXX: We only use the first VSI we find */
422 if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
423 sc->vsi_res = &sc->vf_res->vsi_res[i];
426 device_printf(dev, "%s: no LAN VSI found\n", __func__);
430 vsi->id = sc->vsi_res->vsi_id;
432 iavf_dbg_init(sc, "Resource Acquisition complete\n");
434 /* If no mac address was assigned just make a random one */
435 if (!iavf_check_ether_addr(hw->mac.addr)) {
436 u8 addr[ETHER_ADDR_LEN];
437 arc4rand(&addr, sizeof(addr), 0);
440 bcopy(addr, hw->mac.addr, sizeof(addr));
442 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
443 iflib_set_mac(ctx, hw->mac.addr);
445 /* Allocate filter lists */
446 iavf_init_filters(sc);
448 /* Fill out more iflib parameters */
449 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
450 sc->vsi_res->num_queue_pairs;
451 if (vsi->enable_head_writeback) {
452 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
453 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
454 scctx->isc_txrx = &ixl_txrx_hwb;
456 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
457 * sizeof(struct i40e_tx_desc), DBA_ALIGN);
458 scctx->isc_txrx = &ixl_txrx_dwb;
460 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
461 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
462 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
463 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
464 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
465 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
466 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
467 scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE;
468 scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
469 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
474 free(sc->vf_res, M_IAVF);
476 i40e_shutdown_adminq(hw);
478 iavf_free_pci_resources(sc);
484 iavf_if_attach_post(if_ctx_t ctx)
492 INIT_DBG_DEV(dev, "begin");
494 dev = iflib_get_dev(ctx);
495 sc = iflib_get_softc(ctx);
497 vsi->ifp = iflib_get_ifp(ctx);
500 /* Save off determined number of queues for interface */
501 vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
502 vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
504 /* Setup the stack interface */
505 iavf_setup_interface(dev, sc);
507 INIT_DBG_DEV(dev, "Interface setup complete");
509 /* Initialize statistics & add sysctls */
510 bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
511 iavf_add_device_sysctls(sc);
513 sc->init_state = IAVF_INIT_READY;
514 atomic_store_rel_32(&sc->queues_enabled, 0);
516 /* We want AQ enabled early for init */
517 iavf_enable_adminq_irq(hw);
519 INIT_DBG_DEV(dev, "end");
525 * XXX: iflib always ignores the return value of detach()
526 * -> This means that this isn't allowed to fail
529 iavf_if_detach(if_ctx_t ctx)
531 struct iavf_sc *sc = iflib_get_softc(ctx);
532 struct ixl_vsi *vsi = &sc->vsi;
533 struct i40e_hw *hw = &sc->hw;
534 device_t dev = sc->dev;
535 enum i40e_status_code status;
537 INIT_DBG_DEV(dev, "begin");
539 /* Remove all the media and link information */
540 ifmedia_removeall(vsi->media);
542 iavf_disable_adminq_irq(hw);
543 status = i40e_shutdown_adminq(&sc->hw);
544 if (status != I40E_SUCCESS) {
546 "i40e_shutdown_adminq() failed with status %s\n",
547 i40e_stat_str(hw, status));
550 free(sc->vf_res, M_IAVF);
551 iavf_free_pci_resources(sc);
552 iavf_free_filters(sc);
554 INIT_DBG_DEV(dev, "end");
559 iavf_if_shutdown(if_ctx_t ctx)
565 iavf_if_suspend(if_ctx_t ctx)
571 iavf_if_resume(if_ctx_t ctx)
577 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
580 if_ctx_t ctx = sc->vsi.ctx;
582 error = ixl_vc_send_cmd(sc, op);
584 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
588 /* Don't wait for a response if the device is being detached. */
589 if (!iflib_in_detach(ctx)) {
590 iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
591 error = sx_sleep(ixl_vc_get_op_chan(sc, op),
592 iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
594 if (error == EWOULDBLOCK)
595 device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
602 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
606 error = ixl_vc_send_cmd(sc, op);
608 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
614 iavf_init_queues(struct ixl_vsi *vsi)
616 if_softc_ctx_t scctx = vsi->shared;
617 struct ixl_tx_queue *tx_que = vsi->tx_queues;
618 struct ixl_rx_queue *rx_que = vsi->rx_queues;
621 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
622 ixl_init_tx_ring(vsi, tx_que);
624 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
627 if (scctx->isc_max_frame_size <= MCLBYTES)
628 rxr->mbuf_sz = MCLBYTES;
630 rxr->mbuf_sz = MJUMPAGESIZE;
632 wr32(vsi->hw, rxr->tail, 0);
637 iavf_if_init(if_ctx_t ctx)
639 struct iavf_sc *sc = iflib_get_softc(ctx);
640 struct ixl_vsi *vsi = &sc->vsi;
641 struct i40e_hw *hw = &sc->hw;
642 struct ifnet *ifp = iflib_get_ifp(ctx);
643 u8 tmpaddr[ETHER_ADDR_LEN];
646 INIT_DBG_IF(ifp, "begin");
648 MPASS(sx_xlocked(iflib_ctx_lock_get(ctx)));
650 error = iavf_reset_complete(hw);
652 device_printf(sc->dev, "%s: VF reset failed\n",
656 if (!i40e_check_asq_alive(hw)) {
657 iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
658 pci_enable_busmaster(sc->dev);
659 i40e_shutdown_adminq(hw);
660 i40e_init_adminq(hw);
663 /* Make sure queues are disabled */
664 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
666 bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
667 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
668 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
669 error = iavf_del_mac_filter(sc, hw->mac.addr);
671 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
673 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
676 error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
677 if (!error || error == EEXIST)
678 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
679 iflib_set_mac(ctx, hw->mac.addr);
681 /* Prepare the queues for operation */
682 iavf_init_queues(vsi);
684 /* Set initial ITR values */
685 iavf_configure_itr(sc);
687 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
693 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
695 /* Init SW TX ring indices */
696 if (vsi->enable_head_writeback)
697 ixl_init_tx_cidx(vsi);
699 ixl_init_tx_rsqs(vsi);
701 /* Configure promiscuous mode */
702 iavf_if_promisc_set(ctx, if_getflags(ifp));
705 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
707 sc->init_state = IAVF_RUNNING;
711 * iavf_attach() helper function; initalizes the admin queue
712 * and attempts to establish contact with the PF by
713 * retrying the initial "API version" message several times
714 * or until the PF responds.
717 iavf_setup_vc(struct iavf_sc *sc)
719 struct i40e_hw *hw = &sc->hw;
720 device_t dev = sc->dev;
721 int error = 0, ret_error = 0, asq_retries = 0;
722 bool send_api_ver_retried = 0;
724 /* Need to set these AQ paramters before initializing AQ */
725 hw->aq.num_arq_entries = IXL_AQ_LEN;
726 hw->aq.num_asq_entries = IXL_AQ_LEN;
727 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
728 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
730 for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) {
731 /* Initialize admin queue */
732 error = i40e_init_adminq(hw);
734 device_printf(dev, "%s: init_adminq failed: %d\n",
740 iavf_dbg_init(sc, "Initialized Admin Queue; starting"
741 " send_api_ver attempt %d", i+1);
744 /* Send VF's API version */
745 error = iavf_send_api_ver(sc);
747 i40e_shutdown_adminq(hw);
749 device_printf(dev, "%s: unable to send api"
750 " version to PF on attempt %d, error %d\n",
751 __func__, i+1, error);
755 while (!i40e_asq_done(hw)) {
756 if (++asq_retries > IAVF_AQ_MAX_ERR) {
757 i40e_shutdown_adminq(hw);
758 device_printf(dev, "Admin Queue timeout "
759 "(waiting for send_api_ver), %d more tries...\n",
760 IAVF_AQ_MAX_ERR - (i + 1));
766 if (asq_retries > IAVF_AQ_MAX_ERR)
769 iavf_dbg_init(sc, "Sent API version message to PF");
771 /* Verify that the VF accepts the PF's API version */
772 error = iavf_verify_api_ver(sc);
773 if (error == ETIMEDOUT) {
774 if (!send_api_ver_retried) {
775 /* Resend message, one more time */
776 send_api_ver_retried = true;
778 "%s: Timeout while verifying API version on first"
779 " try!\n", __func__);
783 "%s: Timeout while verifying API version on second"
784 " try!\n", __func__);
791 "%s: Unable to verify API version,"
792 " error %s\n", __func__, i40e_stat_str(hw, error));
799 i40e_shutdown_adminq(hw);
804 * iavf_attach() helper function; asks the PF for this VF's
805 * configuration, and saves the information if it receives it.
808 iavf_vf_config(struct iavf_sc *sc)
810 struct i40e_hw *hw = &sc->hw;
811 device_t dev = sc->dev;
812 int bufsz, error = 0, ret_error = 0;
813 int asq_retries, retried = 0;
816 error = iavf_send_vf_config_msg(sc);
819 "%s: Unable to send VF config request, attempt %d,"
820 " error %d\n", __func__, retried + 1, error);
825 while (!i40e_asq_done(hw)) {
826 if (++asq_retries > IAVF_AQ_MAX_ERR) {
827 device_printf(dev, "%s: Admin Queue timeout "
828 "(waiting for send_vf_config_msg), attempt %d\n",
829 __func__, retried + 1);
836 iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n",
840 bufsz = sizeof(struct virtchnl_vf_resource) +
841 (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
842 sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT);
845 "%s: Unable to allocate memory for VF configuration"
846 " message from PF on attempt %d\n", __func__, retried + 1);
852 /* Check for VF config response */
853 error = iavf_get_vf_config(sc);
854 if (error == ETIMEDOUT) {
855 /* The 1st time we timeout, send the configuration message again */
861 "%s: iavf_get_vf_config() timed out waiting for a response\n",
866 "%s: Unable to get VF configuration from PF after %d tries!\n",
867 __func__, retried + 1);
873 free(sc->vf_res, M_IAVF);
879 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix)
881 struct iavf_sc *sc = iflib_get_softc(ctx);
882 struct ixl_vsi *vsi = &sc->vsi;
883 struct ixl_rx_queue *rx_que = vsi->rx_queues;
884 struct ixl_tx_queue *tx_que = vsi->tx_queues;
885 int err, i, rid, vector = 0;
888 MPASS(vsi->shared->isc_nrxqsets > 0);
889 MPASS(vsi->shared->isc_ntxqsets > 0);
891 /* Admin Que is vector 0*/
893 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
894 iavf_msix_adminq, sc, 0, "aq");
896 iflib_irq_free(ctx, &vsi->irq);
897 device_printf(iflib_get_dev(ctx),
898 "Failed to register Admin Que handler");
902 /* Now set up the stations */
903 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
906 snprintf(buf, sizeof(buf), "rxq%d", i);
907 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
908 IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
909 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than
910 * what's expected in the iflib context? */
912 device_printf(iflib_get_dev(ctx),
913 "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
914 vsi->num_rx_queues = i + 1;
917 rx_que->msix = vector;
920 bzero(buf, sizeof(buf));
922 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
923 snprintf(buf, sizeof(buf), "txq%d", i);
924 iflib_softirq_alloc_generic(ctx,
925 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
926 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
928 /* TODO: Maybe call a strategy function for this to figure out which
929 * interrupts to map Tx queues to. I don't know if there's an immediately
930 * better way than this other than a user-supplied map, though. */
931 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
936 iflib_irq_free(ctx, &vsi->irq);
937 rx_que = vsi->rx_queues;
938 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
939 iflib_irq_free(ctx, &rx_que->que_irq);
943 /* Enable all interrupts */
945 iavf_if_enable_intr(if_ctx_t ctx)
947 struct iavf_sc *sc = iflib_get_softc(ctx);
948 struct ixl_vsi *vsi = &sc->vsi;
950 iavf_enable_intr(vsi);
953 /* Disable all interrupts */
955 iavf_if_disable_intr(if_ctx_t ctx)
957 struct iavf_sc *sc = iflib_get_softc(ctx);
958 struct ixl_vsi *vsi = &sc->vsi;
960 iavf_disable_intr(vsi);
964 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
966 struct iavf_sc *sc = iflib_get_softc(ctx);
967 struct ixl_vsi *vsi = &sc->vsi;
968 struct i40e_hw *hw = vsi->hw;
969 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid];
971 iavf_enable_queue_irq(hw, rx_que->msix - 1);
976 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
978 struct iavf_sc *sc = iflib_get_softc(ctx);
979 struct ixl_vsi *vsi = &sc->vsi;
980 struct i40e_hw *hw = vsi->hw;
981 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
983 iavf_enable_queue_irq(hw, tx_que->msix - 1);
988 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
990 struct iavf_sc *sc = iflib_get_softc(ctx);
991 struct ixl_vsi *vsi = &sc->vsi;
992 if_softc_ctx_t scctx = vsi->shared;
993 struct ixl_tx_queue *que;
996 MPASS(scctx->isc_ntxqsets > 0);
998 MPASS(scctx->isc_ntxqsets == ntxqsets);
1000 /* Allocate queue structure memory */
1001 if (!(vsi->tx_queues =
1002 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1003 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1007 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1008 struct tx_ring *txr = &que->txr;
1013 if (!vsi->enable_head_writeback) {
1014 /* Allocate report status array */
1015 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
1016 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1020 /* Init report status array */
1021 for (j = 0; j < scctx->isc_ntxd[0]; j++)
1022 txr->tx_rsq[j] = QIDX_INVALID;
1024 /* get the virtual and physical address of the hardware queues */
1025 txr->tail = I40E_QTX_TAIL1(txr->me);
1026 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1027 txr->tx_paddr = paddrs[i * ntxqs];
1033 iavf_if_queues_free(ctx);
1038 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1040 struct iavf_sc *sc = iflib_get_softc(ctx);
1041 struct ixl_vsi *vsi = &sc->vsi;
1042 struct ixl_rx_queue *que;
1046 if_softc_ctx_t scctx = vsi->shared;
1047 MPASS(scctx->isc_nrxqsets > 0);
1049 MPASS(scctx->isc_nrxqsets == nrxqsets);
1052 /* Allocate queue structure memory */
1053 if (!(vsi->rx_queues =
1054 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1055 nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1056 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1061 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1062 struct rx_ring *rxr = &que->rxr;
1067 /* get the virtual and physical address of the hardware queues */
1068 rxr->tail = I40E_QRX_TAIL1(rxr->me);
1069 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1070 rxr->rx_paddr = paddrs[i * nrxqs];
1076 iavf_if_queues_free(ctx);
1081 iavf_if_queues_free(if_ctx_t ctx)
1083 struct iavf_sc *sc = iflib_get_softc(ctx);
1084 struct ixl_vsi *vsi = &sc->vsi;
1086 if (!vsi->enable_head_writeback) {
1087 struct ixl_tx_queue *que;
1090 for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1091 struct tx_ring *txr = &que->txr;
1092 if (txr->tx_rsq != NULL) {
1093 free(txr->tx_rsq, M_IAVF);
1099 if (vsi->tx_queues != NULL) {
1100 free(vsi->tx_queues, M_IAVF);
1101 vsi->tx_queues = NULL;
1103 if (vsi->rx_queues != NULL) {
1104 free(vsi->rx_queues, M_IAVF);
1105 vsi->rx_queues = NULL;
1110 iavf_check_aq_errors(struct iavf_sc *sc)
1112 struct i40e_hw *hw = &sc->hw;
1113 device_t dev = sc->dev;
1115 u8 aq_error = false;
1117 /* check for Admin queue errors */
1118 oldreg = reg = rd32(hw, hw->aq.arq.len);
1119 if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
1120 device_printf(dev, "ARQ VF Error detected\n");
1121 reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
1124 if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
1125 device_printf(dev, "ARQ Overflow Error detected\n");
1126 reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
1129 if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
1130 device_printf(dev, "ARQ Critical Error detected\n");
1131 reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
1135 wr32(hw, hw->aq.arq.len, reg);
1137 oldreg = reg = rd32(hw, hw->aq.asq.len);
1138 if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
1139 device_printf(dev, "ASQ VF Error detected\n");
1140 reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
1143 if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
1144 device_printf(dev, "ASQ Overflow Error detected\n");
1145 reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
1148 if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
1149 device_printf(dev, "ASQ Critical Error detected\n");
1150 reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
1154 wr32(hw, hw->aq.asq.len, reg);
1157 device_printf(dev, "WARNING: Stopping VF!\n");
1159 * A VF reset might not be enough to fix a problem here;
1160 * a PF reset could be required.
1162 sc->init_state = IAVF_RESET_REQUIRED;
1164 iavf_request_reset(sc);
1167 return (aq_error ? EIO : 0);
1170 static enum i40e_status_code
1171 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1173 enum i40e_status_code status = I40E_SUCCESS;
1174 struct i40e_arq_event_info event;
1175 struct i40e_hw *hw = &sc->hw;
1176 struct virtchnl_msg *v_msg;
1177 int error = 0, loop = 0;
1180 error = iavf_check_aq_errors(sc);
1182 return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR);
1184 event.buf_len = IXL_AQ_BUF_SZ;
1185 event.msg_buf = sc->aq_buffer;
1186 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1187 v_msg = (struct virtchnl_msg *)&event.desc;
1189 /* clean and process any events */
1191 status = i40e_clean_arq_element(hw, &event, pending);
1193 * Also covers normal case when i40e_clean_arq_element()
1194 * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK"
1198 iavf_vc_completion(sc, v_msg->v_opcode,
1199 v_msg->v_retval, event.msg_buf, event.msg_len);
1200 bzero(event.msg_buf, IXL_AQ_BUF_SZ);
1201 } while (*pending && (loop++ < IXL_ADM_LIMIT));
1203 /* Re-enable admin queue interrupt cause */
1204 reg = rd32(hw, I40E_VFINT_ICR0_ENA1);
1205 reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK;
1206 wr32(hw, I40E_VFINT_ICR0_ENA1, reg);
1212 iavf_if_update_admin_status(if_ctx_t ctx)
1214 struct iavf_sc *sc = iflib_get_softc(ctx);
1215 struct i40e_hw *hw = &sc->hw;
1218 iavf_process_adminq(sc, &pending);
1219 iavf_update_link_status(sc);
1222 * If there are still messages to process, reschedule.
1223 * Otherwise, re-enable the Admin Queue interrupt.
1226 iflib_admin_intr_deferred(ctx);
1228 iavf_enable_adminq_irq(hw);
1232 iavf_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1234 struct iavf_sc *sc = arg;
1237 if (ifma->ifma_addr->sa_family != AF_LINK)
1239 error = iavf_add_mac_filter(sc,
1240 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1247 iavf_if_multi_set(if_ctx_t ctx)
1249 struct iavf_sc *sc = iflib_get_softc(ctx);
1252 IOCTL_DEBUGOUT("iavf_if_multi_set: begin");
1254 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1255 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1256 /* Delete MC filters and enable mulitcast promisc instead */
1257 iavf_init_multi(sc);
1258 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1259 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1263 /* If there aren't too many filters, delete existing MC filters */
1264 iavf_init_multi(sc);
1266 /* And (re-)install filters for all mcast addresses */
1267 mcnt = if_multi_apply(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc);
1270 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
1274 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1276 struct iavf_sc *sc = iflib_get_softc(ctx);
1277 struct ixl_vsi *vsi = &sc->vsi;
1279 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1280 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1281 ETHER_VLAN_ENCAP_LEN)
1284 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1285 ETHER_VLAN_ENCAP_LEN;
1291 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1294 struct ifnet *ifp = iflib_get_ifp(ctx);
1296 struct iavf_sc *sc = iflib_get_softc(ctx);
1298 INIT_DBG_IF(ifp, "begin");
1300 iavf_update_link_status(sc);
1302 ifmr->ifm_status = IFM_AVALID;
1303 ifmr->ifm_active = IFM_ETHER;
1308 ifmr->ifm_status |= IFM_ACTIVE;
1309 /* Hardware is always full-duplex */
1310 ifmr->ifm_active |= IFM_FDX;
1312 /* Based on the link speed reported by the PF over the AdminQ, choose a
1313 * PHY type to report. This isn't 100% correct since we don't really
1314 * know the underlying PHY type of the PF, but at least we can report
1315 * a valid link speed...
1317 switch (sc->link_speed) {
1318 case VIRTCHNL_LINK_SPEED_100MB:
1319 ifmr->ifm_active |= IFM_100_TX;
1321 case VIRTCHNL_LINK_SPEED_1GB:
1322 ifmr->ifm_active |= IFM_1000_T;
1324 case VIRTCHNL_LINK_SPEED_10GB:
1325 ifmr->ifm_active |= IFM_10G_SR;
1327 case VIRTCHNL_LINK_SPEED_20GB:
1328 case VIRTCHNL_LINK_SPEED_25GB:
1329 ifmr->ifm_active |= IFM_25G_SR;
1331 case VIRTCHNL_LINK_SPEED_40GB:
1332 ifmr->ifm_active |= IFM_40G_SR4;
1335 ifmr->ifm_active |= IFM_UNKNOWN;
1339 INIT_DBG_IF(ifp, "end");
1343 iavf_if_media_change(if_ctx_t ctx)
1345 struct ifmedia *ifm = iflib_get_media(ctx);
1347 INIT_DEBUGOUT("ixl_media_change: begin");
1349 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1352 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1357 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1359 struct iavf_sc *sc = iflib_get_softc(ctx);
1360 struct ifnet *ifp = iflib_get_ifp(ctx);
1362 sc->promisc_flags = 0;
1364 if (flags & IFF_ALLMULTI ||
1365 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1366 sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC;
1367 if (flags & IFF_PROMISC)
1368 sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC;
1370 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC);
1376 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1378 struct iavf_sc *sc = iflib_get_softc(ctx);
1379 struct i40e_hw *hw = &sc->hw;
1385 /* Check for when PF triggers a VF reset */
1386 val = rd32(hw, I40E_VFGEN_RSTAT) &
1387 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1388 if (val != VIRTCHNL_VFR_VFACTIVE
1389 && val != VIRTCHNL_VFR_COMPLETED) {
1390 iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1394 /* Fire off the adminq task */
1395 iflib_admin_intr_deferred(ctx);
1398 iavf_request_stats(sc);
1402 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1404 struct iavf_sc *sc = iflib_get_softc(ctx);
1405 struct ixl_vsi *vsi = &sc->vsi;
1406 struct iavf_vlan_filter *v;
1408 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1412 v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO);
1413 SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1415 v->flags = IXL_FILTER_ADD;
1417 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1421 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1423 struct iavf_sc *sc = iflib_get_softc(ctx);
1424 struct ixl_vsi *vsi = &sc->vsi;
1425 struct iavf_vlan_filter *v;
1428 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1431 SLIST_FOREACH(v, sc->vlan_filters, next) {
1432 if (v->vlan == vtag) {
1433 v->flags = IXL_FILTER_DEL;
1439 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1443 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1445 struct iavf_sc *sc = iflib_get_softc(ctx);
1446 struct ixl_vsi *vsi = &sc->vsi;
1447 if_t ifp = iflib_get_ifp(ctx);
1450 case IFCOUNTER_IPACKETS:
1451 return (vsi->ipackets);
1452 case IFCOUNTER_IERRORS:
1453 return (vsi->ierrors);
1454 case IFCOUNTER_OPACKETS:
1455 return (vsi->opackets);
1456 case IFCOUNTER_OERRORS:
1457 return (vsi->oerrors);
1458 case IFCOUNTER_COLLISIONS:
1459 /* Collisions are by standard impossible in 40G/10G Ethernet */
1461 case IFCOUNTER_IBYTES:
1462 return (vsi->ibytes);
1463 case IFCOUNTER_OBYTES:
1464 return (vsi->obytes);
1465 case IFCOUNTER_IMCASTS:
1466 return (vsi->imcasts);
1467 case IFCOUNTER_OMCASTS:
1468 return (vsi->omcasts);
1469 case IFCOUNTER_IQDROPS:
1470 return (vsi->iqdrops);
1471 case IFCOUNTER_OQDROPS:
1472 return (vsi->oqdrops);
1473 case IFCOUNTER_NOPROTO:
1474 return (vsi->noproto);
1476 return (if_get_counter_default(ifp, cnt));
1482 iavf_free_pci_resources(struct iavf_sc *sc)
1484 struct ixl_vsi *vsi = &sc->vsi;
1485 struct ixl_rx_queue *rx_que = vsi->rx_queues;
1486 device_t dev = sc->dev;
1488 /* We may get here before stations are setup */
1492 /* Release all interrupts */
1493 iflib_irq_free(vsi->ctx, &vsi->irq);
1495 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1496 iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1499 if (sc->pci_mem != NULL)
1500 bus_release_resource(dev, SYS_RES_MEMORY,
1501 PCIR_BAR(0), sc->pci_mem);
1506 ** Requests a VF reset from the PF.
1508 ** Requires the VF's Admin Queue to be initialized.
1511 iavf_reset(struct iavf_sc *sc)
1513 struct i40e_hw *hw = &sc->hw;
1514 device_t dev = sc->dev;
1517 /* Ask the PF to reset us if we are initiating */
1518 if (sc->init_state != IAVF_RESET_PENDING)
1519 iavf_request_reset(sc);
1521 i40e_msec_pause(100);
1522 error = iavf_reset_complete(hw);
1524 device_printf(dev, "%s: VF reset failed\n",
1528 pci_enable_busmaster(dev);
1530 error = i40e_shutdown_adminq(hw);
1532 device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1537 error = i40e_init_adminq(hw);
1539 device_printf(dev, "%s: init_adminq failed: %d\n",
1544 iavf_enable_adminq_irq(hw);
1549 iavf_reset_complete(struct i40e_hw *hw)
1553 /* Wait up to ~10 seconds */
1554 for (int i = 0; i < 100; i++) {
1555 reg = rd32(hw, I40E_VFGEN_RSTAT) &
1556 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1558 if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
1559 (reg == VIRTCHNL_VFR_COMPLETED))
1561 i40e_msec_pause(100);
1568 iavf_setup_interface(device_t dev, struct iavf_sc *sc)
1570 struct ixl_vsi *vsi = &sc->vsi;
1571 if_ctx_t ctx = vsi->ctx;
1572 struct ifnet *ifp = iflib_get_ifp(ctx);
1574 INIT_DBG_DEV(dev, "begin");
1576 vsi->shared->isc_max_frame_size =
1577 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1578 + ETHER_VLAN_ENCAP_LEN;
1579 #if __FreeBSD_version >= 1100000
1580 if_setbaudrate(ifp, IF_Gbps(40));
1582 if_initbaudrate(ifp, IF_Gbps(40));
1585 ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1586 ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
1590 ** Get a new filter and add it to the mac filter list.
1592 static struct iavf_mac_filter *
1593 iavf_get_mac_filter(struct iavf_sc *sc)
1595 struct iavf_mac_filter *f;
1597 f = malloc(sizeof(struct iavf_mac_filter),
1598 M_IAVF, M_NOWAIT | M_ZERO);
1600 SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1606 ** Find the filter with matching MAC address
1608 static struct iavf_mac_filter *
1609 iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1611 struct iavf_mac_filter *f;
1614 SLIST_FOREACH(f, sc->mac_filters, next) {
1615 if (cmp_etheraddr(f->macaddr, macaddr)) {
1627 ** Admin Queue interrupt handler
1630 iavf_msix_adminq(void *arg)
1632 struct iavf_sc *sc = arg;
1633 struct i40e_hw *hw = &sc->hw;
1635 bool do_task = FALSE;
1639 reg = rd32(hw, I40E_VFINT_ICR01);
1641 * For masking off interrupt causes that need to be handled before
1642 * they can be re-enabled
1644 mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1646 /* Check on the cause */
1647 if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) {
1648 mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK;
1652 wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1653 iavf_enable_adminq_irq(hw);
1656 return (FILTER_SCHEDULE_THREAD);
1658 return (FILTER_HANDLED);
1662 iavf_enable_intr(struct ixl_vsi *vsi)
1664 struct i40e_hw *hw = vsi->hw;
1665 struct ixl_rx_queue *que = vsi->rx_queues;
1667 iavf_enable_adminq_irq(hw);
1668 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1669 iavf_enable_queue_irq(hw, que->rxr.me);
1673 iavf_disable_intr(struct ixl_vsi *vsi)
1675 struct i40e_hw *hw = vsi->hw;
1676 struct ixl_rx_queue *que = vsi->rx_queues;
1678 for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1679 iavf_disable_queue_irq(hw, que->rxr.me);
1683 iavf_disable_adminq_irq(struct i40e_hw *hw)
1685 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1686 wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1688 rd32(hw, I40E_VFGEN_RSTAT);
1692 iavf_enable_adminq_irq(struct i40e_hw *hw)
1694 wr32(hw, I40E_VFINT_DYN_CTL01,
1695 I40E_VFINT_DYN_CTL01_INTENA_MASK |
1696 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1697 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
1699 rd32(hw, I40E_VFGEN_RSTAT);
1703 iavf_enable_queue_irq(struct i40e_hw *hw, int id)
1707 reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1708 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1709 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1710 wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1714 iavf_disable_queue_irq(struct i40e_hw *hw, int id)
1716 wr32(hw, I40E_VFINT_DYN_CTLN1(id),
1717 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1718 rd32(hw, I40E_VFGEN_RSTAT);
1722 iavf_configure_tx_itr(struct iavf_sc *sc)
1724 struct i40e_hw *hw = &sc->hw;
1725 struct ixl_vsi *vsi = &sc->vsi;
1726 struct ixl_tx_queue *que = vsi->tx_queues;
1728 vsi->tx_itr_setting = sc->tx_itr;
1730 for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
1731 struct tx_ring *txr = &que->txr;
1733 wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
1734 vsi->tx_itr_setting);
1735 txr->itr = vsi->tx_itr_setting;
1736 txr->latency = IXL_AVE_LATENCY;
1741 iavf_configure_rx_itr(struct iavf_sc *sc)
1743 struct i40e_hw *hw = &sc->hw;
1744 struct ixl_vsi *vsi = &sc->vsi;
1745 struct ixl_rx_queue *que = vsi->rx_queues;
1747 vsi->rx_itr_setting = sc->rx_itr;
1749 for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
1750 struct rx_ring *rxr = &que->rxr;
1752 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
1753 vsi->rx_itr_setting);
1754 rxr->itr = vsi->rx_itr_setting;
1755 rxr->latency = IXL_AVE_LATENCY;
1760 * Get initial ITR values from tunable values.
1763 iavf_configure_itr(struct iavf_sc *sc)
1765 iavf_configure_tx_itr(sc);
1766 iavf_configure_rx_itr(sc);
1770 ** Provide a update to the queue RX
1771 ** interrupt moderation value.
1774 iavf_set_queue_rx_itr(struct ixl_rx_queue *que)
1776 struct ixl_vsi *vsi = que->vsi;
1777 struct i40e_hw *hw = vsi->hw;
1778 struct rx_ring *rxr = &que->rxr;
1780 /* Idle, do nothing */
1781 if (rxr->bytes == 0)
1784 /* Update the hardware if needed */
1785 if (rxr->itr != vsi->rx_itr_setting) {
1786 rxr->itr = vsi->rx_itr_setting;
1787 wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1788 que->rxr.me), rxr->itr);
1793 iavf_msix_que(void *arg)
1795 struct ixl_rx_queue *rx_que = arg;
1799 iavf_set_queue_rx_itr(rx_que);
1800 // iavf_set_queue_tx_itr(que);
1802 return (FILTER_SCHEDULE_THREAD);
1805 /*********************************************************************
1806 * Multicast Initialization
1808 * This routine is called by init to reset a fresh state.
1810 **********************************************************************/
1812 iavf_init_multi(struct iavf_sc *sc)
1814 struct iavf_mac_filter *f;
1817 /* First clear any multicast filters */
1818 SLIST_FOREACH(f, sc->mac_filters, next) {
1819 if ((f->flags & IXL_FILTER_USED)
1820 && (f->flags & IXL_FILTER_MC)) {
1821 f->flags |= IXL_FILTER_DEL;
1826 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
1830 ** Note: this routine updates the OS on the link state
1831 ** the real check of the hardware only happens with
1832 ** a link interrupt.
1835 iavf_update_link_status(struct iavf_sc *sc)
1837 struct ixl_vsi *vsi = &sc->vsi;
1841 if (vsi->link_active == FALSE) {
1842 vsi->link_active = TRUE;
1843 baudrate = ixl_max_vc_speed_to_value(sc->link_speed);
1844 iavf_dbg_info(sc, "baudrate: %lu\n", baudrate);
1845 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1847 } else { /* Link down */
1848 if (vsi->link_active == TRUE) {
1849 vsi->link_active = FALSE;
1850 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1855 /*********************************************************************
1857 * This routine disables all traffic on the adapter by issuing a
1858 * global reset on the MAC and deallocates TX/RX buffers.
1860 **********************************************************************/
1863 iavf_stop(struct iavf_sc *sc)
1869 iavf_disable_intr(&sc->vsi);
1871 if (atomic_load_acq_32(&sc->queues_enabled))
1872 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES);
1876 iavf_if_stop(if_ctx_t ctx)
1878 struct iavf_sc *sc = iflib_get_softc(ctx);
1884 iavf_config_rss_reg(struct iavf_sc *sc)
1886 struct i40e_hw *hw = &sc->hw;
1887 struct ixl_vsi *vsi = &sc->vsi;
1889 u64 set_hena = 0, hena;
1891 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1893 u32 rss_hash_config;
1896 /* Don't set up RSS if using a single queue */
1897 if (vsi->num_rx_queues == 1) {
1898 wr32(hw, I40E_VFQF_HENA(0), 0);
1899 wr32(hw, I40E_VFQF_HENA(1), 0);
1905 /* Fetch the configured RSS key */
1906 rss_getkey((uint8_t *) &rss_seed);
1908 ixl_get_default_rss_key(rss_seed);
1911 /* Fill out hash function seed */
1912 for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1913 wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
1915 /* Enable PCTYPES for RSS: */
1917 rss_hash_config = rss_gethashconfig();
1918 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1919 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1920 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1921 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1922 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1923 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1924 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1925 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1926 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1927 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1928 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1929 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1930 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1931 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1933 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1935 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
1936 ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
1938 wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
1939 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1941 /* Populate the LUT with max no. of queues in round robin fashion */
1942 for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
1943 if (j == vsi->num_rx_queues)
1947 * Fetch the RSS bucket id for the given indirection entry.
1948 * Cap it at the number of configured buckets (which is
1951 que_id = rss_get_indirection_to_bucket(i);
1952 que_id = que_id % vsi->num_rx_queues;
1956 /* lut = 4-byte sliding window of 4 lut entries */
1957 lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
1958 /* On i = 3, we have 4 entries in lut; write to the register */
1960 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1961 DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
1968 iavf_config_rss_pf(struct iavf_sc *sc)
1970 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY);
1972 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA);
1974 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT);
1978 ** iavf_config_rss - setup RSS
1980 ** RSS keys and table are cleared on VF reset.
1983 iavf_config_rss(struct iavf_sc *sc)
1985 if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
1986 iavf_dbg_info(sc, "Setting up RSS using VF registers...");
1987 iavf_config_rss_reg(sc);
1988 } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1989 iavf_dbg_info(sc, "Setting up RSS using messages to PF...");
1990 iavf_config_rss_pf(sc);
1992 device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
1996 ** This routine adds new MAC filters to the sc's list;
1997 ** these are later added in hardware by sending a virtual
2001 iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags)
2003 struct iavf_mac_filter *f;
2005 /* Does one already exist? */
2006 f = iavf_find_mac_filter(sc, macaddr);
2008 iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n",
2009 MAC_FORMAT_ARGS(macaddr));
2013 /* If not, get a new empty filter */
2014 f = iavf_get_mac_filter(sc);
2016 device_printf(sc->dev, "%s: no filters available!!\n",
2021 iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n",
2022 MAC_FORMAT_ARGS(macaddr));
2024 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2025 f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2031 ** Marks a MAC filter for deletion.
2034 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
2036 struct iavf_mac_filter *f;
2038 f = iavf_find_mac_filter(sc, macaddr);
2042 f->flags |= IXL_FILTER_DEL;
2047 * Re-uses the name from the PF driver.
2050 iavf_add_device_sysctls(struct iavf_sc *sc)
2052 struct ixl_vsi *vsi = &sc->vsi;
2053 device_t dev = sc->dev;
2055 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2056 struct sysctl_oid_list *ctx_list =
2057 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2058 struct sysctl_oid *debug_node;
2059 struct sysctl_oid_list *debug_list;
2061 SYSCTL_ADD_PROC(ctx, ctx_list,
2062 OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
2063 sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed");
2065 SYSCTL_ADD_PROC(ctx, ctx_list,
2066 OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
2067 sc, 0, iavf_sysctl_tx_itr, "I",
2068 "Immediately set TX ITR value for all queues");
2070 SYSCTL_ADD_PROC(ctx, ctx_list,
2071 OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
2072 sc, 0, iavf_sysctl_rx_itr, "I",
2073 "Immediately set RX ITR value for all queues");
2075 /* Add sysctls meant to print debug information, but don't list them
2076 * in "sysctl -a" output. */
2077 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2078 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
2079 debug_list = SYSCTL_CHILDREN(debug_node);
2081 SYSCTL_ADD_UINT(ctx, debug_list,
2082 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2083 &sc->hw.debug_mask, 0, "Shared code debug message level");
2085 SYSCTL_ADD_UINT(ctx, debug_list,
2086 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2087 &sc->dbg_mask, 0, "Non-shared code debug message level");
2089 SYSCTL_ADD_PROC(ctx, debug_list,
2090 OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
2091 sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List");
2093 SYSCTL_ADD_PROC(ctx, debug_list,
2094 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
2095 sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2097 SYSCTL_ADD_PROC(ctx, debug_list,
2098 OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
2099 sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
2101 SYSCTL_ADD_PROC(ctx, debug_list,
2102 OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
2103 sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
2105 /* Add stats sysctls */
2106 ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
2107 ixl_add_queues_sysctls(dev, vsi);
2112 iavf_init_filters(struct iavf_sc *sc)
2114 sc->mac_filters = malloc(sizeof(struct mac_list),
2115 M_IAVF, M_WAITOK | M_ZERO);
2116 SLIST_INIT(sc->mac_filters);
2117 sc->vlan_filters = malloc(sizeof(struct vlan_list),
2118 M_IAVF, M_WAITOK | M_ZERO);
2119 SLIST_INIT(sc->vlan_filters);
2123 iavf_free_filters(struct iavf_sc *sc)
2125 struct iavf_mac_filter *f;
2126 struct iavf_vlan_filter *v;
2128 while (!SLIST_EMPTY(sc->mac_filters)) {
2129 f = SLIST_FIRST(sc->mac_filters);
2130 SLIST_REMOVE_HEAD(sc->mac_filters, next);
2133 free(sc->mac_filters, M_IAVF);
2134 while (!SLIST_EMPTY(sc->vlan_filters)) {
2135 v = SLIST_FIRST(sc->vlan_filters);
2136 SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2139 free(sc->vlan_filters, M_IAVF);
2143 iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
2157 switch (link_speed) {
2158 case VIRTCHNL_LINK_SPEED_100MB:
2161 case VIRTCHNL_LINK_SPEED_1GB:
2164 case VIRTCHNL_LINK_SPEED_10GB:
2167 case VIRTCHNL_LINK_SPEED_40GB:
2170 case VIRTCHNL_LINK_SPEED_20GB:
2173 case VIRTCHNL_LINK_SPEED_25GB:
2176 case VIRTCHNL_LINK_SPEED_UNKNOWN:
2182 return speeds[index];
2186 iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2188 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2191 error = sysctl_handle_string(oidp,
2192 iavf_vc_speed_to_string(sc->link_speed),
2198 * Sanity check and save off tunable values.
2201 iavf_save_tunables(struct iavf_sc *sc)
2203 device_t dev = sc->dev;
2205 /* Save tunable information */
2206 sc->dbg_mask = iavf_core_debug_mask;
2207 sc->hw.debug_mask = iavf_shared_debug_mask;
2208 sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback);
2210 if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) {
2211 device_printf(dev, "Invalid tx_itr value of %d set!\n",
2213 device_printf(dev, "tx_itr must be between %d and %d, "
2216 device_printf(dev, "Using default value of %d instead\n",
2218 sc->tx_itr = IXL_ITR_4K;
2220 sc->tx_itr = iavf_tx_itr;
2222 if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) {
2223 device_printf(dev, "Invalid rx_itr value of %d set!\n",
2225 device_printf(dev, "rx_itr must be between %d and %d, "
2228 device_printf(dev, "Using default value of %d instead\n",
2230 sc->rx_itr = IXL_ITR_8K;
2232 sc->rx_itr = iavf_rx_itr;
2236 * Used to set the Tx ITR value for all of the VF's queues.
2237 * Writes to the ITR registers immediately.
2240 iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
2242 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2243 device_t dev = sc->dev;
2244 int requested_tx_itr;
2247 requested_tx_itr = sc->tx_itr;
2248 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
2249 if ((error) || (req->newptr == NULL))
2251 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
2253 "Invalid TX itr value; value must be between 0 and %d\n",
2258 sc->tx_itr = requested_tx_itr;
2259 iavf_configure_tx_itr(sc);
2265 * Used to set the Rx ITR value for all of the VF's queues.
2266 * Writes to the ITR registers immediately.
2269 iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
2271 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2272 device_t dev = sc->dev;
2273 int requested_rx_itr;
2276 requested_rx_itr = sc->rx_itr;
2277 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
2278 if ((error) || (req->newptr == NULL))
2280 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
2282 "Invalid RX itr value; value must be between 0 and %d\n",
2287 sc->rx_itr = requested_rx_itr;
2288 iavf_configure_rx_itr(sc);
2294 iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
2296 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2297 struct iavf_mac_filter *f;
2298 struct iavf_vlan_filter *v;
2299 device_t dev = sc->dev;
2300 int ftl_len, ftl_counter = 0, error = 0;
2303 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2305 device_printf(dev, "Could not allocate sbuf for output.\n");
2309 sbuf_printf(buf, "\n");
2311 /* Print MAC filters */
2312 sbuf_printf(buf, "MAC Filters:\n");
2314 SLIST_FOREACH(f, sc->mac_filters, next)
2317 sbuf_printf(buf, "(none)\n");
2319 SLIST_FOREACH(f, sc->mac_filters, next) {
2321 MAC_FORMAT ", flags %#06x\n",
2322 MAC_FORMAT_ARGS(f->macaddr), f->flags);
2326 /* Print VLAN filters */
2327 sbuf_printf(buf, "VLAN Filters:\n");
2329 SLIST_FOREACH(v, sc->vlan_filters, next)
2332 sbuf_printf(buf, "(none)");
2334 SLIST_FOREACH(v, sc->vlan_filters, next) {
2338 /* don't print '\n' for last entry */
2339 if (++ftl_counter != ftl_len)
2340 sbuf_printf(buf, "\n");
2344 error = sbuf_finish(buf);
2346 device_printf(dev, "Error finishing sbuf: %d\n", error);
2353 * Print out mapping of TX queue indexes and Rx queue indexes
2357 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2359 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2360 struct ixl_vsi *vsi = &sc->vsi;
2361 device_t dev = sc->dev;
2365 struct ixl_rx_queue *rx_que = vsi->rx_queues;
2366 struct ixl_tx_queue *tx_que = vsi->tx_queues;
2368 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2370 device_printf(dev, "Could not allocate sbuf for output.\n");
2374 sbuf_cat(buf, "\n");
2375 for (int i = 0; i < vsi->num_rx_queues; i++) {
2376 rx_que = &vsi->rx_queues[i];
2377 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2379 for (int i = 0; i < vsi->num_tx_queues; i++) {
2380 tx_que = &vsi->tx_queues[i];
2381 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2384 error = sbuf_finish(buf);
2386 device_printf(dev, "Error finishing sbuf: %d\n", error);
2392 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2394 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2396 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2397 int do_reset = 0, error = 0;
2399 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2400 if ((error) || (req->newptr == NULL))
2403 if (do_reset == 1) {
2405 if (CTX_ACTIVE(sc->vsi.ctx))
2406 iflib_request_reset(sc->vsi.ctx);
2413 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2415 struct iavf_sc *sc = (struct iavf_sc *)arg1;
2416 device_t dev = sc->dev;
2417 int do_reset = 0, error = 0;
2419 error = sysctl_handle_int(oidp, &do_reset, 0, req);
2420 if ((error) || (req->newptr == NULL))
2423 if (do_reset == 1) {
2424 if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2425 device_printf(dev, "PCIE FLR failed\n");
2428 else if (CTX_ACTIVE(sc->vsi.ctx))
2429 iflib_request_reset(sc->vsi.ctx);