1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
34 #include "ixl_pf_iov.h"
36 /* Private functions */
37 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
38 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
39 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 static int ixl_vc_opcode_level(uint16_t opcode);
43 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
45 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
46 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
47 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
48 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
49 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
54 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
55 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
56 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
57 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
58 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
59 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
60 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
61 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
63 enum i40e_queue_type *last_type, uint16_t *last_queue);
64 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
65 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
68 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
69 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
71 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
76 static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
78 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
81 * TODO: Move pieces of this into iflib and call the rest in a handler?
83 * e.g. ixl_if_iov_set_schema
85 * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
89 ixl_initialize_sriov(struct ixl_pf *pf)
91 device_t dev = pf->dev;
92 struct i40e_hw *hw = &pf->hw;
93 nvlist_t *pf_schema, *vf_schema;
96 pf_schema = pci_iov_schema_alloc_node();
97 vf_schema = pci_iov_schema_alloc_node();
98 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
99 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
100 IOV_SCHEMA_HASDEFAULT, TRUE);
101 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
102 IOV_SCHEMA_HASDEFAULT, FALSE);
103 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
104 IOV_SCHEMA_HASDEFAULT, FALSE);
105 pci_iov_schema_add_uint16(vf_schema, "num-queues",
106 IOV_SCHEMA_HASDEFAULT,
107 max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
109 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
110 if (iov_error != 0) {
112 "Failed to initialize SR-IOV (error=%d)\n",
115 device_printf(dev, "SR-IOV ready\n");
117 pf->vc_debug_lvl = 1;
121 * Allocate the VSI for a VF.
124 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
128 struct i40e_vsi_context vsi_ctx;
130 enum i40e_status_code code;
135 vsi_ctx.pf_num = hw->pf_id;
136 vsi_ctx.uplink_seid = pf->veb_seid;
137 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
138 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
139 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
141 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
143 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
144 if (pf->enable_vf_loopback)
145 vsi_ctx.info.switch_id =
146 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
148 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
149 vsi_ctx.info.sec_flags = 0;
150 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
151 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
153 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
154 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
155 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
157 vsi_ctx.info.valid_sections |=
158 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
159 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
161 /* XXX: Only scattered allocation is supported for VFs right now */
162 for (i = 0; i < vf->qtag.num_active; i++)
163 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
164 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
165 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
167 vsi_ctx.info.tc_mapping[0] = htole16(
168 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
169 ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
171 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
172 if (code != I40E_SUCCESS)
173 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
174 vf->vsi.seid = vsi_ctx.seid;
175 vf->vsi.vsi_num = vsi_ctx.vsi_number;
176 vf->vsi.num_rx_queues = vf->qtag.num_active;
177 vf->vsi.num_tx_queues = vf->qtag.num_active;
179 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
180 if (code != I40E_SUCCESS)
181 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
183 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
184 if (code != I40E_SUCCESS) {
185 device_printf(dev, "Failed to disable BW limit: %d\n",
186 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
190 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
195 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
201 vf->vsi.flags |= IXL_FLAGS_IS_VF;
203 error = ixl_vf_alloc_vsi(pf, vf);
207 vf->vsi.dev = pf->dev;
209 ixl_init_filters(&vf->vsi);
210 /* Let VF receive broadcast Ethernet frames */
211 error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
213 device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
214 /* Re-add VF's MAC/VLAN filters to its VSI */
215 ixl_reconfigure_filters(&vf->vsi);
221 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
228 * Two queues are mapped in a single register, so we have to do some
229 * gymnastics to convert the queue number into a register index and
233 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
235 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
236 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
237 qtable |= val << shift;
238 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
242 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
251 * Contiguous mappings aren't actually supported by the hardware,
252 * so we have to use non-contiguous mappings.
254 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
255 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
257 /* Enable LAN traffic on this VF */
258 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
259 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
261 /* Program index of each VF queue into PF queue space
262 * (This is only needed if QTABLE is enabled) */
263 for (i = 0; i < vf->vsi.num_tx_queues; i++) {
264 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
265 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
267 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
269 for (; i < IXL_MAX_VSI_QUEUES; i++)
270 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
271 I40E_VPLAN_QTABLE_QINDEX_MASK);
273 /* Map queues allocated to VF to its VSI;
274 * This mapping matches the VF-wide mapping since the VF
275 * is only given a single VSI */
276 for (i = 0; i < vf->vsi.num_tx_queues; i++)
277 ixl_vf_map_vsi_queue(hw, vf, i,
278 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
280 /* Set rest of VSI queues as unused. */
281 for (; i < IXL_MAX_VSI_QUEUES; i++)
282 ixl_vf_map_vsi_queue(hw, vf, i,
283 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
289 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
298 i40e_aq_delete_element(hw, vsi->seid, NULL);
302 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
305 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
310 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
313 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
314 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
319 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
322 uint32_t vfint_reg, vpint_reg;
327 ixl_vf_vsi_release(pf, &vf->vsi);
329 /* Index 0 has a special register. */
330 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
332 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
333 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
334 ixl_vf_disable_queue_intr(hw, vfint_reg);
337 /* Index 0 has a special register. */
338 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
340 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
341 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
342 ixl_vf_unregister_intr(hw, vpint_reg);
345 vf->vsi.num_tx_queues = 0;
346 vf->vsi.num_rx_queues = 0;
350 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
354 uint16_t global_vf_num;
358 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
360 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
361 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
362 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
363 ciad = rd32(hw, I40E_PF_PCI_CIAD);
364 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
373 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
380 ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
382 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
383 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
384 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
387 ixl_reinit_vf(pf, vf);
389 ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
393 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
396 uint32_t vfrstat, vfrtrig;
401 error = ixl_flush_pcie(pf, vf);
403 device_printf(pf->dev,
404 "Timed out waiting for PCIe activity to stop on VF-%d\n",
407 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
410 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
411 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
415 if (i == IXL_VF_RESET_TIMEOUT)
416 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
418 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
420 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
421 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
422 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
424 if (vf->vsi.seid != 0)
425 ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
426 ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
428 ixl_vf_release_resources(pf, vf);
429 ixl_vf_setup_vsi(pf, vf);
430 ixl_vf_map_queues(pf, vf);
432 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
437 ixl_vc_opcode_level(uint16_t opcode)
440 case VIRTCHNL_OP_GET_STATS:
448 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
449 enum i40e_status_code status, void *msg, uint16_t len)
455 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
457 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
458 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
459 ixl_vc_opcode_str(op), op, status, vf->vf_num);
461 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
465 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
468 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
472 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
473 enum i40e_status_code status, const char *file, int line)
477 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
478 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
479 status, vf->vf_num, file, line);
480 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
484 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
487 struct virtchnl_version_info *recv_vf_version;
488 device_t dev = pf->dev;
490 recv_vf_version = (struct virtchnl_version_info *)msg;
492 /* VFs running the 1.0 API expect to get 1.0 back */
493 if (VF_IS_V10(recv_vf_version)) {
494 vf->version.major = 1;
495 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
497 vf->version.major = VIRTCHNL_VERSION_MAJOR;
498 vf->version.minor = VIRTCHNL_VERSION_MINOR;
500 if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
501 (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
503 "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
504 __func__, vf->vf_num,
505 recv_vf_version->major, recv_vf_version->minor,
506 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
509 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
510 &vf->version, sizeof(vf->version));
514 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
517 ixl_reset_vf(pf, vf);
519 /* No response to a reset message. */
523 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
526 struct virtchnl_vf_resource reply;
528 bzero(&reply, sizeof(reply));
530 if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
531 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
532 VIRTCHNL_VF_OFFLOAD_RSS_REG |
533 VIRTCHNL_VF_OFFLOAD_VLAN;
535 /* Force VF RSS setup by PF in 1.1+ VFs */
536 reply.vf_cap_flags = *(u32 *)msg & (
537 VIRTCHNL_VF_OFFLOAD_L2 |
538 VIRTCHNL_VF_OFFLOAD_RSS_PF |
539 VIRTCHNL_VF_OFFLOAD_VLAN);
542 reply.num_queue_pairs = vf->vsi.num_tx_queues;
543 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
544 reply.rss_key_size = 52;
545 reply.rss_lut_size = 64;
546 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
547 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
548 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
549 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
551 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
552 I40E_SUCCESS, &reply, sizeof(reply));
556 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
557 struct virtchnl_txq_info *info)
560 struct i40e_hmc_obj_txq txq;
561 uint16_t global_queue_num, global_vf_num;
562 enum i40e_status_code status;
566 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
567 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
568 bzero(&txq, sizeof(txq));
570 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
571 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
573 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
574 if (status != I40E_SUCCESS)
577 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
579 txq.head_wb_ena = info->headwb_enabled;
580 txq.head_wb_addr = info->dma_headwb_addr;
581 txq.qlen = info->ring_len;
582 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
585 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
586 if (status != I40E_SUCCESS)
589 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
590 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
591 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
592 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
595 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
601 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
602 struct virtchnl_rxq_info *info)
605 struct i40e_hmc_obj_rxq rxq;
606 uint16_t global_queue_num;
607 enum i40e_status_code status;
610 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
611 bzero(&rxq, sizeof(rxq));
613 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
614 vf->vf_num, global_queue_num, info->queue_id);
616 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
619 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
620 info->max_pkt_size < ETHER_MIN_LEN)
623 if (info->splithdr_enabled) {
624 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
627 rxq.hsplit_0 = info->rx_split_pos &
628 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
629 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
630 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
631 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
632 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
637 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
638 if (status != I40E_SUCCESS)
641 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
642 rxq.qlen = info->ring_len;
644 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
650 rxq.rxmax = info->max_pkt_size;
651 rxq.tphrdesc_ena = 1;
652 rxq.tphwdesc_ena = 1;
658 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
659 if (status != I40E_SUCCESS)
662 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
668 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
671 struct virtchnl_vsi_queue_config_info *info;
672 struct virtchnl_queue_pair_info *pair;
676 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
677 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
678 vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
679 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
684 if (info->vsi_id != vf->vsi.vsi_num) {
685 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
686 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
687 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
692 for (i = 0; i < info->num_queue_pairs; i++) {
693 pair = &info->qpair[i];
695 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
696 pair->rxq.vsi_id != vf->vsi.vsi_num ||
697 pair->txq.queue_id != pair->rxq.queue_id ||
698 pair->txq.queue_id >= vf->vsi.num_tx_queues) {
700 i40e_send_vf_nack(pf, vf,
701 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
705 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
706 i40e_send_vf_nack(pf, vf,
707 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
711 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
712 i40e_send_vf_nack(pf, vf,
713 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
718 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
722 ixl_vf_set_qctl(struct ixl_pf *pf,
723 const struct virtchnl_vector_map *vector,
724 enum i40e_queue_type cur_type, uint16_t cur_queue,
725 enum i40e_queue_type *last_type, uint16_t *last_queue)
727 uint32_t offset, qctl;
730 if (cur_type == I40E_QUEUE_TYPE_RX) {
731 offset = I40E_QINT_RQCTL(cur_queue);
732 itr_indx = vector->rxitr_idx;
734 offset = I40E_QINT_TQCTL(cur_queue);
735 itr_indx = vector->txitr_idx;
738 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
739 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
740 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
741 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
742 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
744 wr32(&pf->hw, offset, qctl);
746 *last_type = cur_type;
747 *last_queue = cur_queue;
751 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
752 const struct virtchnl_vector_map *vector)
756 enum i40e_queue_type type, last_type;
758 uint16_t rxq_map, txq_map, cur_queue, last_queue;
762 rxq_map = vector->rxq_map;
763 txq_map = vector->txq_map;
765 last_queue = IXL_END_OF_INTR_LNKLST;
766 last_type = I40E_QUEUE_TYPE_RX;
769 * The datasheet says to optimize performance, RX queues and TX queues
770 * should be interleaved in the interrupt linked list, so we process
773 while ((rxq_map != 0) || (txq_map != 0)) {
775 qindex = ffs(txq_map) - 1;
776 type = I40E_QUEUE_TYPE_TX;
777 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
778 ixl_vf_set_qctl(pf, vector, type, cur_queue,
779 &last_type, &last_queue);
780 txq_map &= ~(1 << qindex);
784 qindex = ffs(rxq_map) - 1;
785 type = I40E_QUEUE_TYPE_RX;
786 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
787 ixl_vf_set_qctl(pf, vector, type, cur_queue,
788 &last_type, &last_queue);
789 rxq_map &= ~(1 << qindex);
793 if (vector->vector_id == 0)
794 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
796 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
799 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
800 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
806 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
809 struct virtchnl_irq_map_info *map;
810 struct virtchnl_vector_map *vector;
812 int i, largest_txq, largest_rxq;
817 for (i = 0; i < map->num_vectors; i++) {
818 vector = &map->vecmap[i];
820 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
821 vector->vsi_id != vf->vsi.vsi_num) {
822 i40e_send_vf_nack(pf, vf,
823 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
827 if (vector->rxq_map != 0) {
828 largest_rxq = fls(vector->rxq_map) - 1;
829 if (largest_rxq >= vf->vsi.num_rx_queues) {
830 i40e_send_vf_nack(pf, vf,
831 VIRTCHNL_OP_CONFIG_IRQ_MAP,
837 if (vector->txq_map != 0) {
838 largest_txq = fls(vector->txq_map) - 1;
839 if (largest_txq >= vf->vsi.num_tx_queues) {
840 i40e_send_vf_nack(pf, vf,
841 VIRTCHNL_OP_CONFIG_IRQ_MAP,
847 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
848 vector->txitr_idx > IXL_MAX_ITR_IDX) {
849 i40e_send_vf_nack(pf, vf,
850 VIRTCHNL_OP_CONFIG_IRQ_MAP,
855 ixl_vf_config_vector(pf, vf, vector);
858 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
862 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
865 struct virtchnl_queue_select *select;
870 if (select->vsi_id != vf->vsi.vsi_num ||
871 select->rx_queues == 0 || select->tx_queues == 0) {
872 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
877 /* Enable TX rings selected by the VF */
878 for (int i = 0; i < 32; i++) {
879 if ((1 << i) & select->tx_queues) {
880 /* Warn if queue is out of VF allocation range */
881 if (i >= vf->vsi.num_tx_queues) {
882 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
886 /* Skip this queue if it hasn't been configured */
887 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
889 /* Warn if this queue is already marked as enabled */
890 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
891 ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
894 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
898 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
902 /* Enable RX rings selected by the VF */
903 for (int i = 0; i < 32; i++) {
904 if ((1 << i) & select->rx_queues) {
905 /* Warn if queue is out of VF allocation range */
906 if (i >= vf->vsi.num_rx_queues) {
907 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
911 /* Skip this queue if it hasn't been configured */
912 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
914 /* Warn if this queue is already marked as enabled */
915 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
916 ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
918 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
922 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
927 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
932 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
936 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
937 void *msg, uint16_t msg_size)
939 struct virtchnl_queue_select *select;
944 if (select->vsi_id != vf->vsi.vsi_num ||
945 select->rx_queues == 0 || select->tx_queues == 0) {
946 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
951 /* Disable TX rings selected by the VF */
952 for (int i = 0; i < 32; i++) {
953 if ((1 << i) & select->tx_queues) {
954 /* Warn if queue is out of VF allocation range */
955 if (i >= vf->vsi.num_tx_queues) {
956 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
960 /* Skip this queue if it hasn't been configured */
961 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
963 /* Warn if this queue is already marked as disabled */
964 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
965 ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
969 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
973 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
977 /* Enable RX rings selected by the VF */
978 for (int i = 0; i < 32; i++) {
979 if ((1 << i) & select->rx_queues) {
980 /* Warn if queue is out of VF allocation range */
981 if (i >= vf->vsi.num_rx_queues) {
982 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
986 /* Skip this queue if it hasn't been configured */
987 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
989 /* Warn if this queue is already marked as disabled */
990 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
991 ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
995 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
999 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1004 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1009 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1013 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1016 if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
1020 * If the VF is not allowed to change its MAC address, don't let it
1021 * set a MAC filter for an address that is not a multicast address and
1022 * is not its assigned MAC.
1024 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1025 !(ETHER_IS_MULTICAST(addr) || !ixl_ether_is_equal(addr, vf->mac)))
1032 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1035 struct virtchnl_ether_addr_list *addr_list;
1036 struct virtchnl_ether_addr *addr;
1037 struct ixl_vsi *vsi;
1043 if (addr_list->vsi_id != vsi->vsi_num) {
1044 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1049 for (i = 0; i < addr_list->num_elements; i++) {
1050 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1051 i40e_send_vf_nack(pf, vf,
1052 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1057 for (i = 0; i < addr_list->num_elements; i++) {
1058 addr = &addr_list->list[i];
1059 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1062 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1066 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1069 struct virtchnl_ether_addr_list *addr_list;
1070 struct virtchnl_ether_addr *addr;
1071 struct ixl_vsi *vsi;
1077 if (addr_list->vsi_id != vsi->vsi_num) {
1078 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1083 for (i = 0; i < addr_list->num_elements; i++) {
1084 addr = &addr_list->list[i];
1085 if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
1086 i40e_send_vf_nack(pf, vf,
1087 VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
1092 for (i = 0; i < addr_list->num_elements; i++) {
1093 addr = &addr_list->list[i];
1094 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1097 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1100 static enum i40e_status_code
1101 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1103 struct i40e_vsi_context vsi_ctx;
1105 vsi_ctx.seid = vf->vsi.seid;
1107 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1108 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1109 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1110 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1111 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1115 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1118 struct virtchnl_vlan_filter_list *filter_list;
1119 enum i40e_status_code code;
1124 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1125 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1130 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1131 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1136 for (i = 0; i < filter_list->num_elements; i++) {
1137 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1138 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1144 code = ixl_vf_enable_vlan_strip(pf, vf);
1145 if (code != I40E_SUCCESS) {
1146 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1150 for (i = 0; i < filter_list->num_elements; i++)
1151 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1153 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1157 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1160 struct virtchnl_vlan_filter_list *filter_list;
1165 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1166 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1171 for (i = 0; i < filter_list->num_elements; i++) {
1172 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1173 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1179 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1180 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1185 for (i = 0; i < filter_list->num_elements; i++)
1186 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1188 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1192 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1193 void *msg, uint16_t msg_size)
1195 struct virtchnl_promisc_info *info;
1196 struct i40e_hw *hw = &pf->hw;
1197 enum i40e_status_code code;
1199 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1201 * Do the same thing as the Linux PF driver -- lie to the VF
1203 ixl_send_vf_ack(pf, vf,
1204 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1209 if (info->vsi_id != vf->vsi.vsi_num) {
1210 i40e_send_vf_nack(pf, vf,
1211 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1215 code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1216 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1217 if (code != I40E_SUCCESS) {
1218 device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1219 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1220 i40e_aq_str(hw, hw->aq.asq_last_status));
1221 i40e_send_vf_nack(pf, vf,
1222 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1226 code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1227 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1228 if (code != I40E_SUCCESS) {
1229 device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1230 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1231 i40e_aq_str(hw, hw->aq.asq_last_status));
1232 i40e_send_vf_nack(pf, vf,
1233 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1237 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1241 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1244 struct virtchnl_queue_select *queue;
1247 if (queue->vsi_id != vf->vsi.vsi_num) {
1248 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1253 ixl_update_eth_stats(&vf->vsi);
1255 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1256 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1260 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1264 struct virtchnl_rss_key *key;
1265 struct i40e_aqc_get_set_rss_key_data key_data;
1266 enum i40e_status_code status;
1272 if (key->key_len > 52) {
1273 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1274 vf->vf_num, key->key_len, 52);
1275 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1280 if (key->vsi_id != vf->vsi.vsi_num) {
1281 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1282 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1283 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1288 /* Fill out hash using MAC-dependent method */
1289 if (hw->mac.type == I40E_MAC_X722) {
1290 bzero(&key_data, sizeof(key_data));
1291 if (key->key_len <= 40)
1292 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1294 bcopy(key->key, key_data.standard_rss_key, 40);
1295 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1297 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1299 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1300 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1301 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1302 I40E_ERR_ADMIN_QUEUE_ERROR);
1306 for (int i = 0; i < (key->key_len / 4); i++)
1307 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1310 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1311 vf->vf_num, key->key[0]);
1313 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1317 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1321 struct virtchnl_rss_lut *lut;
1322 enum i40e_status_code status;
1328 if (lut->lut_entries > 64) {
1329 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1330 vf->vf_num, lut->lut_entries, 64);
1331 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1336 if (lut->vsi_id != vf->vsi.vsi_num) {
1337 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1338 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1339 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1344 /* Fill out LUT using MAC-dependent method */
1345 if (hw->mac.type == I40E_MAC_X722) {
1346 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1348 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1349 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1350 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1351 I40E_ERR_ADMIN_QUEUE_ERROR);
1355 for (int i = 0; i < (lut->lut_entries / 4); i++)
1356 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1359 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1360 vf->vf_num, lut->lut[0], lut->lut_entries);
1362 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1366 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1370 struct virtchnl_rss_hena *hena;
1376 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1377 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1379 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1380 vf->vf_num, hena->hena);
1382 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1386 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1388 struct virtchnl_pf_event event;
1392 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1393 event.severity = PF_EVENT_SEVERITY_INFO;
1394 event.event_data.link_event.link_status = pf->vsi.link_active;
1395 event.event_data.link_event.link_speed =
1396 i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
1398 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1403 ixl_broadcast_link_state(struct ixl_pf *pf)
1407 for (i = 0; i < pf->num_vfs; i++)
1408 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1412 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1414 device_t dev = pf->dev;
1416 uint16_t vf_num, msg_size;
1421 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1422 opcode = le32toh(event->desc.cookie_high);
1424 if (vf_num >= pf->num_vfs) {
1425 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1429 vf = &pf->vfs[vf_num];
1430 msg = event->msg_buf;
1431 msg_size = event->msg_len;
1433 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1434 "Got msg %s(%d) from%sVF-%d of size %d\n",
1435 ixl_vc_opcode_str(opcode), opcode,
1436 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1439 /* Perform basic checks on the msg */
1440 err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
1442 device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
1443 __func__, vf->vf_num, opcode, msg_size, err);
1444 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
1448 /* This must be a stray msg from a previously destroyed VF. */
1449 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1453 case VIRTCHNL_OP_VERSION:
1454 ixl_vf_version_msg(pf, vf, msg, msg_size);
1456 case VIRTCHNL_OP_RESET_VF:
1457 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1459 case VIRTCHNL_OP_GET_VF_RESOURCES:
1460 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1461 /* Notify VF of link state after it obtains queues, as this is
1462 * the last thing it will do as part of initialization
1464 ixl_notify_vf_link_state(pf, vf);
1466 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1467 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1469 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1470 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1472 case VIRTCHNL_OP_ENABLE_QUEUES:
1473 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1474 /* Notify VF of link state after it obtains queues, as this is
1475 * the last thing it will do as part of initialization
1477 ixl_notify_vf_link_state(pf, vf);
1479 case VIRTCHNL_OP_DISABLE_QUEUES:
1480 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1482 case VIRTCHNL_OP_ADD_ETH_ADDR:
1483 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1485 case VIRTCHNL_OP_DEL_ETH_ADDR:
1486 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1488 case VIRTCHNL_OP_ADD_VLAN:
1489 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1491 case VIRTCHNL_OP_DEL_VLAN:
1492 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1494 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1495 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1497 case VIRTCHNL_OP_GET_STATS:
1498 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1500 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1501 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1503 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1504 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1506 case VIRTCHNL_OP_SET_RSS_HENA:
1507 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1510 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1511 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1512 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1514 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1519 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1521 ixl_handle_vflr(struct ixl_pf *pf)
1525 uint16_t global_vf_num;
1526 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1531 ixl_dbg_iov(pf, "%s: begin\n", __func__);
1533 /* Re-enable VFLR interrupt cause so driver doesn't miss a
1534 * reset interrupt for another VF */
1535 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1536 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1537 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1540 for (i = 0; i < pf->num_vfs; i++) {
1541 global_vf_num = hw->func_caps.vf_base_id + i;
1544 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1547 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1548 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1549 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1550 if (vflrstat & vflrstat_mask) {
1551 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1554 ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1555 ixl_reinit_vf(pf, vf);
1556 ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1563 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1567 case I40E_AQ_RC_EPERM:
1569 case I40E_AQ_RC_ENOENT:
1571 case I40E_AQ_RC_ESRCH:
1573 case I40E_AQ_RC_EINTR:
1575 case I40E_AQ_RC_EIO:
1577 case I40E_AQ_RC_ENXIO:
1579 case I40E_AQ_RC_E2BIG:
1581 case I40E_AQ_RC_EAGAIN:
1583 case I40E_AQ_RC_ENOMEM:
1585 case I40E_AQ_RC_EACCES:
1587 case I40E_AQ_RC_EFAULT:
1589 case I40E_AQ_RC_EBUSY:
1591 case I40E_AQ_RC_EEXIST:
1593 case I40E_AQ_RC_EINVAL:
1595 case I40E_AQ_RC_ENOTTY:
1597 case I40E_AQ_RC_ENOSPC:
1599 case I40E_AQ_RC_ENOSYS:
1601 case I40E_AQ_RC_ERANGE:
1603 case I40E_AQ_RC_EFLUSHED:
1604 return (EINVAL); /* No exact equivalent in errno.h */
1605 case I40E_AQ_RC_BAD_ADDR:
1607 case I40E_AQ_RC_EMODE:
1609 case I40E_AQ_RC_EFBIG:
1617 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1619 struct i40e_hw *hw = &pf->hw;
1620 device_t dev = pf->dev;
1621 struct ixl_vsi *vsi = &pf->vsi;
1622 struct i40e_vsi_context ctxt;
1625 memset(&ctxt, 0, sizeof(ctxt));
1627 ctxt.seid = vsi->seid;
1628 if (pf->veb_seid != 0)
1629 ctxt.uplink_seid = pf->veb_seid;
1630 ctxt.pf_num = hw->pf_id;
1631 ctxt.connection_type = IXL_VSI_DATA_PORT;
1633 ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1634 ctxt.info.switch_id = (enable) ?
1635 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1637 /* error is set to 0 on success */
1638 error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1640 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1641 " aq_error %d\n", error, hw->aq.asq_last_status);
1648 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1650 struct ixl_pf *pf = iflib_get_softc(ctx);
1651 device_t dev = iflib_get_dev(ctx);
1653 struct ixl_vsi *pf_vsi;
1654 enum i40e_status_code ret;
1660 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1662 if (pf->vfs == NULL) {
1668 * Add the VEB and ...
1669 * - do nothing: VEPA mode
1670 * - enable loopback mode on connected VSIs: VEB mode
1672 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1673 1, FALSE, &pf->veb_seid, FALSE, NULL);
1674 if (ret != I40E_SUCCESS) {
1675 error = hw->aq.asq_last_status;
1676 device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1677 i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1680 if (pf->enable_vf_loopback)
1681 ixl_config_pf_vsi_loopback(pf, true);
1684 * Adding a VEB brings back the default MAC filter(s). Remove them,
1685 * and let the driver add the proper filters back.
1687 ixl_del_default_hw_filters(pf_vsi);
1688 ixl_reconfigure_filters(pf_vsi);
1690 pf->num_vfs = num_vfs;
1694 free(pf->vfs, M_IXL);
1700 ixl_if_iov_uninit(if_ctx_t ctx)
1702 struct ixl_pf *pf = iflib_get_softc(ctx);
1709 for (i = 0; i < pf->num_vfs; i++) {
1710 if (pf->vfs[i].vsi.seid != 0)
1711 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1712 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1713 ixl_free_filters(&pf->vfs[i].vsi.ftl);
1714 ixl_dbg_iov(pf, "VF %d: %d released\n",
1715 i, pf->vfs[i].qtag.num_allocated);
1716 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1719 if (pf->veb_seid != 0) {
1720 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1723 /* Reset PF VSI loopback mode */
1724 if (pf->enable_vf_loopback)
1725 ixl_config_pf_vsi_loopback(pf, false);
1728 num_vfs = pf->num_vfs;
1733 /* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1734 for (i = 0; i < num_vfs; i++)
1735 sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
1740 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1742 device_t dev = pf->dev;
1745 /* Validate, and clamp value if invalid */
1746 if (num_queues < 1 || num_queues > 16)
1747 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1748 num_queues, vf->vf_num);
1749 if (num_queues < 1) {
1750 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1752 } else if (num_queues > IAVF_MAX_QUEUES) {
1753 device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1754 num_queues = IAVF_MAX_QUEUES;
1756 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1758 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1759 num_queues, vf->vf_num);
1763 ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1764 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1765 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1771 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1773 struct ixl_pf *pf = iflib_get_softc(ctx);
1774 char sysctl_name[IXL_QUEUE_NAME_LEN];
1781 vf = &pf->vfs[vfnum];
1784 vf->vf_flags = VF_FLAG_ENABLED;
1786 /* Reserve queue allocation from PF */
1787 vf_num_queues = nvlist_get_number(params, "num-queues");
1788 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1792 error = ixl_vf_setup_vsi(pf, vf);
1796 if (nvlist_exists_binary(params, "mac-addr")) {
1797 mac = nvlist_get_binary(params, "mac-addr", &size);
1798 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1800 if (nvlist_get_bool(params, "allow-set-mac"))
1801 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1804 * If the administrator has not specified a MAC address then
1805 * we must allow the VF to choose one.
1807 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1809 if (nvlist_get_bool(params, "mac-anti-spoof"))
1810 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1812 if (nvlist_get_bool(params, "allow-promisc"))
1813 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1815 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1817 /* VF needs to be reset before it can be used */
1818 ixl_reset_vf(pf, vf);
1821 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1822 ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);