1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixl_pf_iov.h"
37 /* Private functions */
38 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
42 static int ixl_vc_opcode_level(uint16_t opcode);
44 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
46 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
47 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
48 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
49 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
50 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
51 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
54 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
55 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
56 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
57 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
58 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
59 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
60 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
61 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
62 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
64 enum i40e_queue_type *last_type, uint16_t *last_queue);
65 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
66 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
68 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
69 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
72 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
77 static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
79 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
82 * TODO: Move pieces of this into iflib and call the rest in a handler?
84 * e.g. ixl_if_iov_set_schema
86 * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
90 ixl_initialize_sriov(struct ixl_pf *pf)
92 device_t dev = pf->dev;
93 struct i40e_hw *hw = &pf->hw;
94 nvlist_t *pf_schema, *vf_schema;
97 pf_schema = pci_iov_schema_alloc_node();
98 vf_schema = pci_iov_schema_alloc_node();
99 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
100 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
101 IOV_SCHEMA_HASDEFAULT, TRUE);
102 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
103 IOV_SCHEMA_HASDEFAULT, FALSE);
104 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
105 IOV_SCHEMA_HASDEFAULT, FALSE);
106 pci_iov_schema_add_uint16(vf_schema, "num-queues",
107 IOV_SCHEMA_HASDEFAULT,
108 max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
110 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
111 if (iov_error != 0) {
113 "Failed to initialize SR-IOV (error=%d)\n",
116 device_printf(dev, "SR-IOV ready\n");
118 pf->vc_debug_lvl = 1;
122 * Allocate the VSI for a VF.
125 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
130 struct i40e_vsi_context vsi_ctx;
132 enum i40e_status_code code;
138 vsi_ctx.pf_num = hw->pf_id;
139 vsi_ctx.uplink_seid = pf->veb_seid;
140 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
141 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
142 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
144 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
146 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
147 if (pf->enable_vf_loopback)
148 vsi_ctx.info.switch_id =
149 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
151 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
152 vsi_ctx.info.sec_flags = 0;
153 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
154 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
156 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
157 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
158 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
160 vsi_ctx.info.valid_sections |=
161 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
162 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
164 /* XXX: Only scattered allocation is supported for VFs right now */
165 for (i = 0; i < vf->qtag.num_active; i++)
166 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
167 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
168 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
170 vsi_ctx.info.tc_mapping[0] = htole16(
171 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
172 ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
174 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
175 if (code != I40E_SUCCESS)
176 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
177 vf->vsi.seid = vsi_ctx.seid;
178 vf->vsi.vsi_num = vsi_ctx.vsi_number;
179 vf->vsi.num_rx_queues = vf->qtag.num_active;
180 vf->vsi.num_tx_queues = vf->qtag.num_active;
182 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
183 if (code != I40E_SUCCESS)
184 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
186 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
187 if (code != I40E_SUCCESS) {
188 device_printf(dev, "Failed to disable BW limit: %d\n",
189 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
190 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
193 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
198 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
204 vf->vsi.flags |= IXL_FLAGS_IS_VF;
206 error = ixl_vf_alloc_vsi(pf, vf);
210 vf->vsi.dev = pf->dev;
212 ixl_init_filters(&vf->vsi);
213 /* Let VF receive broadcast Ethernet frames */
214 error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
216 device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
217 /* Re-add VF's MAC/VLAN filters to its VSI */
218 ixl_reconfigure_filters(&vf->vsi);
224 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
231 * Two queues are mapped in a single register, so we have to do some
232 * gymnastics to convert the queue number into a register index and
236 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
238 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
239 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
240 qtable |= val << shift;
241 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
245 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
254 * Contiguous mappings aren't actually supported by the hardware,
255 * so we have to use non-contiguous mappings.
257 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
258 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
260 /* Enable LAN traffic on this VF */
261 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
262 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
264 /* Program index of each VF queue into PF queue space
265 * (This is only needed if QTABLE is enabled) */
266 for (i = 0; i < vf->vsi.num_tx_queues; i++) {
267 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
268 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
270 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
272 for (; i < IXL_MAX_VSI_QUEUES; i++)
273 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
274 I40E_VPLAN_QTABLE_QINDEX_MASK);
276 /* Map queues allocated to VF to its VSI;
277 * This mapping matches the VF-wide mapping since the VF
278 * is only given a single VSI */
279 for (i = 0; i < vf->vsi.num_tx_queues; i++)
280 ixl_vf_map_vsi_queue(hw, vf, i,
281 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
283 /* Set rest of VSI queues as unused. */
284 for (; i < IXL_MAX_VSI_QUEUES; i++)
285 ixl_vf_map_vsi_queue(hw, vf, i,
286 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
292 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
301 i40e_aq_delete_element(hw, vsi->seid, NULL);
305 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
308 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
313 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
316 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
317 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
322 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
325 uint32_t vfint_reg, vpint_reg;
330 ixl_vf_vsi_release(pf, &vf->vsi);
332 /* Index 0 has a special register. */
333 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
335 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
336 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
337 ixl_vf_disable_queue_intr(hw, vfint_reg);
340 /* Index 0 has a special register. */
341 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
343 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
344 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
345 ixl_vf_unregister_intr(hw, vpint_reg);
348 vf->vsi.num_tx_queues = 0;
349 vf->vsi.num_rx_queues = 0;
353 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
357 uint16_t global_vf_num;
361 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
363 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
364 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
365 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
366 ciad = rd32(hw, I40E_PF_PCI_CIAD);
367 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
376 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
383 ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
385 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
386 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
387 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
390 ixl_reinit_vf(pf, vf);
392 ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
396 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
399 uint32_t vfrstat, vfrtrig;
404 error = ixl_flush_pcie(pf, vf);
406 device_printf(pf->dev,
407 "Timed out waiting for PCIe activity to stop on VF-%d\n",
410 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
413 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
414 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
418 if (i == IXL_VF_RESET_TIMEOUT)
419 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
421 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
423 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
424 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
425 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
427 if (vf->vsi.seid != 0)
428 ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
429 ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
431 ixl_vf_release_resources(pf, vf);
432 ixl_vf_setup_vsi(pf, vf);
433 ixl_vf_map_queues(pf, vf);
435 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
440 ixl_vc_opcode_level(uint16_t opcode)
443 case VIRTCHNL_OP_GET_STATS:
451 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
452 enum i40e_status_code status, void *msg, uint16_t len)
458 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
460 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
461 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
462 ixl_vc_opcode_str(op), op, status, vf->vf_num);
464 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
468 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
471 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
475 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
476 enum i40e_status_code status, const char *file, int line)
480 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
481 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
482 status, vf->vf_num, file, line);
483 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
487 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
490 struct virtchnl_version_info *recv_vf_version;
491 device_t dev = pf->dev;
493 recv_vf_version = (struct virtchnl_version_info *)msg;
495 /* VFs running the 1.0 API expect to get 1.0 back */
496 if (VF_IS_V10(recv_vf_version)) {
497 vf->version.major = 1;
498 vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
500 vf->version.major = VIRTCHNL_VERSION_MAJOR;
501 vf->version.minor = VIRTCHNL_VERSION_MINOR;
503 if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
504 (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
506 "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
507 __func__, vf->vf_num,
508 recv_vf_version->major, recv_vf_version->minor,
509 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
512 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
513 &vf->version, sizeof(vf->version));
517 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
520 ixl_reset_vf(pf, vf);
522 /* No response to a reset message. */
526 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
529 struct virtchnl_vf_resource reply;
531 bzero(&reply, sizeof(reply));
533 if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
534 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
535 VIRTCHNL_VF_OFFLOAD_RSS_REG |
536 VIRTCHNL_VF_OFFLOAD_VLAN;
538 /* Force VF RSS setup by PF in 1.1+ VFs */
539 reply.vf_cap_flags = *(u32 *)msg & (
540 VIRTCHNL_VF_OFFLOAD_L2 |
541 VIRTCHNL_VF_OFFLOAD_RSS_PF |
542 VIRTCHNL_VF_OFFLOAD_VLAN);
545 reply.num_queue_pairs = vf->vsi.num_tx_queues;
546 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
547 reply.rss_key_size = 52;
548 reply.rss_lut_size = 64;
549 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
550 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
551 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
552 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
554 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
555 I40E_SUCCESS, &reply, sizeof(reply));
559 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
560 struct virtchnl_txq_info *info)
563 struct i40e_hmc_obj_txq txq;
564 uint16_t global_queue_num, global_vf_num;
565 enum i40e_status_code status;
569 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
570 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
571 bzero(&txq, sizeof(txq));
573 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
574 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
576 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
577 if (status != I40E_SUCCESS)
580 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
582 txq.head_wb_ena = info->headwb_enabled;
583 txq.head_wb_addr = info->dma_headwb_addr;
584 txq.qlen = info->ring_len;
585 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
588 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
589 if (status != I40E_SUCCESS)
592 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
593 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
594 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
595 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
598 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
604 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
605 struct virtchnl_rxq_info *info)
608 struct i40e_hmc_obj_rxq rxq;
609 uint16_t global_queue_num;
610 enum i40e_status_code status;
613 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
614 bzero(&rxq, sizeof(rxq));
616 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
617 vf->vf_num, global_queue_num, info->queue_id);
619 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
622 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
623 info->max_pkt_size < ETHER_MIN_LEN)
626 if (info->splithdr_enabled) {
627 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
630 rxq.hsplit_0 = info->rx_split_pos &
631 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
632 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
633 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
634 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
635 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
640 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
641 if (status != I40E_SUCCESS)
644 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
645 rxq.qlen = info->ring_len;
647 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
653 rxq.rxmax = info->max_pkt_size;
654 rxq.tphrdesc_ena = 1;
655 rxq.tphwdesc_ena = 1;
661 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
662 if (status != I40E_SUCCESS)
665 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
671 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
674 struct virtchnl_vsi_queue_config_info *info;
675 struct virtchnl_queue_pair_info *pair;
679 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
680 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
681 vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
682 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
687 if (info->vsi_id != vf->vsi.vsi_num) {
688 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
689 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
690 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
695 for (i = 0; i < info->num_queue_pairs; i++) {
696 pair = &info->qpair[i];
698 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
699 pair->rxq.vsi_id != vf->vsi.vsi_num ||
700 pair->txq.queue_id != pair->rxq.queue_id ||
701 pair->txq.queue_id >= vf->vsi.num_tx_queues) {
703 i40e_send_vf_nack(pf, vf,
704 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
708 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
709 i40e_send_vf_nack(pf, vf,
710 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
714 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
715 i40e_send_vf_nack(pf, vf,
716 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
721 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
725 ixl_vf_set_qctl(struct ixl_pf *pf,
726 const struct virtchnl_vector_map *vector,
727 enum i40e_queue_type cur_type, uint16_t cur_queue,
728 enum i40e_queue_type *last_type, uint16_t *last_queue)
730 uint32_t offset, qctl;
733 if (cur_type == I40E_QUEUE_TYPE_RX) {
734 offset = I40E_QINT_RQCTL(cur_queue);
735 itr_indx = vector->rxitr_idx;
737 offset = I40E_QINT_TQCTL(cur_queue);
738 itr_indx = vector->txitr_idx;
741 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
742 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
743 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
744 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
745 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
747 wr32(&pf->hw, offset, qctl);
749 *last_type = cur_type;
750 *last_queue = cur_queue;
754 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
755 const struct virtchnl_vector_map *vector)
759 enum i40e_queue_type type, last_type;
761 uint16_t rxq_map, txq_map, cur_queue, last_queue;
765 rxq_map = vector->rxq_map;
766 txq_map = vector->txq_map;
768 last_queue = IXL_END_OF_INTR_LNKLST;
769 last_type = I40E_QUEUE_TYPE_RX;
772 * The datasheet says to optimize performance, RX queues and TX queues
773 * should be interleaved in the interrupt linked list, so we process
776 while ((rxq_map != 0) || (txq_map != 0)) {
778 qindex = ffs(txq_map) - 1;
779 type = I40E_QUEUE_TYPE_TX;
780 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
781 ixl_vf_set_qctl(pf, vector, type, cur_queue,
782 &last_type, &last_queue);
783 txq_map &= ~(1 << qindex);
787 qindex = ffs(rxq_map) - 1;
788 type = I40E_QUEUE_TYPE_RX;
789 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
790 ixl_vf_set_qctl(pf, vector, type, cur_queue,
791 &last_type, &last_queue);
792 rxq_map &= ~(1 << qindex);
796 if (vector->vector_id == 0)
797 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
799 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
802 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
803 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
809 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
812 struct virtchnl_irq_map_info *map;
813 struct virtchnl_vector_map *vector;
815 int i, largest_txq, largest_rxq;
820 for (i = 0; i < map->num_vectors; i++) {
821 vector = &map->vecmap[i];
823 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
824 vector->vsi_id != vf->vsi.vsi_num) {
825 i40e_send_vf_nack(pf, vf,
826 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
830 if (vector->rxq_map != 0) {
831 largest_rxq = fls(vector->rxq_map) - 1;
832 if (largest_rxq >= vf->vsi.num_rx_queues) {
833 i40e_send_vf_nack(pf, vf,
834 VIRTCHNL_OP_CONFIG_IRQ_MAP,
840 if (vector->txq_map != 0) {
841 largest_txq = fls(vector->txq_map) - 1;
842 if (largest_txq >= vf->vsi.num_tx_queues) {
843 i40e_send_vf_nack(pf, vf,
844 VIRTCHNL_OP_CONFIG_IRQ_MAP,
850 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
851 vector->txitr_idx > IXL_MAX_ITR_IDX) {
852 i40e_send_vf_nack(pf, vf,
853 VIRTCHNL_OP_CONFIG_IRQ_MAP,
858 ixl_vf_config_vector(pf, vf, vector);
861 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
865 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
868 struct virtchnl_queue_select *select;
873 if (select->vsi_id != vf->vsi.vsi_num ||
874 select->rx_queues == 0 || select->tx_queues == 0) {
875 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
880 /* Enable TX rings selected by the VF */
881 for (int i = 0; i < 32; i++) {
882 if ((1 << i) & select->tx_queues) {
883 /* Warn if queue is out of VF allocation range */
884 if (i >= vf->vsi.num_tx_queues) {
885 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
889 /* Skip this queue if it hasn't been configured */
890 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
892 /* Warn if this queue is already marked as enabled */
893 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
894 ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
897 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
901 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
905 /* Enable RX rings selected by the VF */
906 for (int i = 0; i < 32; i++) {
907 if ((1 << i) & select->rx_queues) {
908 /* Warn if queue is out of VF allocation range */
909 if (i >= vf->vsi.num_rx_queues) {
910 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
914 /* Skip this queue if it hasn't been configured */
915 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
917 /* Warn if this queue is already marked as enabled */
918 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
919 ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
921 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
925 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
930 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
935 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
939 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
940 void *msg, uint16_t msg_size)
942 struct virtchnl_queue_select *select;
947 if (select->vsi_id != vf->vsi.vsi_num ||
948 select->rx_queues == 0 || select->tx_queues == 0) {
949 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
954 /* Disable TX rings selected by the VF */
955 for (int i = 0; i < 32; i++) {
956 if ((1 << i) & select->tx_queues) {
957 /* Warn if queue is out of VF allocation range */
958 if (i >= vf->vsi.num_tx_queues) {
959 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
963 /* Skip this queue if it hasn't been configured */
964 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
966 /* Warn if this queue is already marked as disabled */
967 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
968 ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
972 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
976 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
980 /* Enable RX rings selected by the VF */
981 for (int i = 0; i < 32; i++) {
982 if ((1 << i) & select->rx_queues) {
983 /* Warn if queue is out of VF allocation range */
984 if (i >= vf->vsi.num_rx_queues) {
985 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
989 /* Skip this queue if it hasn't been configured */
990 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
992 /* Warn if this queue is already marked as disabled */
993 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
994 ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
998 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1002 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1007 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1012 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1016 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1019 if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
1023 * If the VF is not allowed to change its MAC address, don't let it
1024 * set a MAC filter for an address that is not a multicast address and
1025 * is not its assigned MAC.
1027 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1028 !(ETHER_IS_MULTICAST(addr) || !ixl_ether_is_equal(addr, vf->mac)))
1035 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1038 struct virtchnl_ether_addr_list *addr_list;
1039 struct virtchnl_ether_addr *addr;
1040 struct ixl_vsi *vsi;
1046 if (addr_list->vsi_id != vsi->vsi_num) {
1047 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1052 for (i = 0; i < addr_list->num_elements; i++) {
1053 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1054 i40e_send_vf_nack(pf, vf,
1055 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1060 for (i = 0; i < addr_list->num_elements; i++) {
1061 addr = &addr_list->list[i];
1062 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1065 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1069 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1072 struct virtchnl_ether_addr_list *addr_list;
1073 struct virtchnl_ether_addr *addr;
1074 struct ixl_vsi *vsi;
1080 if (addr_list->vsi_id != vsi->vsi_num) {
1081 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1086 for (i = 0; i < addr_list->num_elements; i++) {
1087 addr = &addr_list->list[i];
1088 if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
1089 i40e_send_vf_nack(pf, vf,
1090 VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
1095 for (i = 0; i < addr_list->num_elements; i++) {
1096 addr = &addr_list->list[i];
1097 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1100 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1103 static enum i40e_status_code
1104 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1106 struct i40e_vsi_context vsi_ctx;
1108 vsi_ctx.seid = vf->vsi.seid;
1110 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1111 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1112 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1113 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1114 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1118 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1121 struct virtchnl_vlan_filter_list *filter_list;
1122 enum i40e_status_code code;
1127 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1128 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1133 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1134 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1139 for (i = 0; i < filter_list->num_elements; i++) {
1140 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1141 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1147 code = ixl_vf_enable_vlan_strip(pf, vf);
1148 if (code != I40E_SUCCESS) {
1149 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1153 for (i = 0; i < filter_list->num_elements; i++)
1154 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1156 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1160 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1163 struct virtchnl_vlan_filter_list *filter_list;
1168 if (filter_list->vsi_id != vf->vsi.vsi_num) {
1169 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1174 for (i = 0; i < filter_list->num_elements; i++) {
1175 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1176 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1182 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1183 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1188 for (i = 0; i < filter_list->num_elements; i++)
1189 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1191 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1195 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1196 void *msg, uint16_t msg_size)
1198 struct virtchnl_promisc_info *info;
1199 struct i40e_hw *hw = &pf->hw;
1200 enum i40e_status_code code;
1202 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1204 * Do the same thing as the Linux PF driver -- lie to the VF
1206 ixl_send_vf_ack(pf, vf,
1207 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1212 if (info->vsi_id != vf->vsi.vsi_num) {
1213 i40e_send_vf_nack(pf, vf,
1214 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1218 code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1219 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1220 if (code != I40E_SUCCESS) {
1221 device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1222 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1223 i40e_aq_str(hw, hw->aq.asq_last_status));
1224 i40e_send_vf_nack(pf, vf,
1225 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1229 code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1230 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1231 if (code != I40E_SUCCESS) {
1232 device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1233 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1234 i40e_aq_str(hw, hw->aq.asq_last_status));
1235 i40e_send_vf_nack(pf, vf,
1236 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1240 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1244 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1247 struct virtchnl_queue_select *queue;
1250 if (queue->vsi_id != vf->vsi.vsi_num) {
1251 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1256 ixl_update_eth_stats(&vf->vsi);
1258 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1259 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1263 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1267 struct virtchnl_rss_key *key;
1268 struct i40e_aqc_get_set_rss_key_data key_data;
1269 enum i40e_status_code status;
1275 if (key->key_len > 52) {
1276 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1277 vf->vf_num, key->key_len, 52);
1278 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1283 if (key->vsi_id != vf->vsi.vsi_num) {
1284 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1285 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1286 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1291 /* Fill out hash using MAC-dependent method */
1292 if (hw->mac.type == I40E_MAC_X722) {
1293 bzero(&key_data, sizeof(key_data));
1294 if (key->key_len <= 40)
1295 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1297 bcopy(key->key, key_data.standard_rss_key, 40);
1298 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1300 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1302 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1303 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1304 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1305 I40E_ERR_ADMIN_QUEUE_ERROR);
1309 for (int i = 0; i < (key->key_len / 4); i++)
1310 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1313 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1314 vf->vf_num, key->key[0]);
1316 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1320 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1324 struct virtchnl_rss_lut *lut;
1325 enum i40e_status_code status;
1331 if (lut->lut_entries > 64) {
1332 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1333 vf->vf_num, lut->lut_entries, 64);
1334 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1339 if (lut->vsi_id != vf->vsi.vsi_num) {
1340 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1341 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1342 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1347 /* Fill out LUT using MAC-dependent method */
1348 if (hw->mac.type == I40E_MAC_X722) {
1349 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1351 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1352 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1353 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1354 I40E_ERR_ADMIN_QUEUE_ERROR);
1358 for (int i = 0; i < (lut->lut_entries / 4); i++)
1359 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1362 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1363 vf->vf_num, lut->lut[0], lut->lut_entries);
1365 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1369 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1373 struct virtchnl_rss_hena *hena;
1379 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1380 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1382 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1383 vf->vf_num, hena->hena);
1385 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1389 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1391 struct virtchnl_pf_event event;
1395 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1396 event.severity = PF_EVENT_SEVERITY_INFO;
1397 event.event_data.link_event.link_status = pf->vsi.link_active;
1398 event.event_data.link_event.link_speed =
1399 i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
1401 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1406 ixl_broadcast_link_state(struct ixl_pf *pf)
1410 for (i = 0; i < pf->num_vfs; i++)
1411 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1415 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1417 device_t dev = pf->dev;
1419 uint16_t vf_num, msg_size;
1424 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1425 opcode = le32toh(event->desc.cookie_high);
1427 if (vf_num >= pf->num_vfs) {
1428 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1432 vf = &pf->vfs[vf_num];
1433 msg = event->msg_buf;
1434 msg_size = event->msg_len;
1436 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1437 "Got msg %s(%d) from%sVF-%d of size %d\n",
1438 ixl_vc_opcode_str(opcode), opcode,
1439 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1442 /* Perform basic checks on the msg */
1443 err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
1445 device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
1446 __func__, vf->vf_num, opcode, msg_size, err);
1447 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
1451 /* This must be a stray msg from a previously destroyed VF. */
1452 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1456 case VIRTCHNL_OP_VERSION:
1457 ixl_vf_version_msg(pf, vf, msg, msg_size);
1459 case VIRTCHNL_OP_RESET_VF:
1460 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1462 case VIRTCHNL_OP_GET_VF_RESOURCES:
1463 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1464 /* Notify VF of link state after it obtains queues, as this is
1465 * the last thing it will do as part of initialization
1467 ixl_notify_vf_link_state(pf, vf);
1469 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1470 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1472 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1473 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1475 case VIRTCHNL_OP_ENABLE_QUEUES:
1476 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1477 /* Notify VF of link state after it obtains queues, as this is
1478 * the last thing it will do as part of initialization
1480 ixl_notify_vf_link_state(pf, vf);
1482 case VIRTCHNL_OP_DISABLE_QUEUES:
1483 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1485 case VIRTCHNL_OP_ADD_ETH_ADDR:
1486 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1488 case VIRTCHNL_OP_DEL_ETH_ADDR:
1489 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1491 case VIRTCHNL_OP_ADD_VLAN:
1492 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1494 case VIRTCHNL_OP_DEL_VLAN:
1495 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1497 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1498 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1500 case VIRTCHNL_OP_GET_STATS:
1501 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1503 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1504 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1506 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1507 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1509 case VIRTCHNL_OP_SET_RSS_HENA:
1510 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1513 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1514 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1515 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1517 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1522 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1524 ixl_handle_vflr(struct ixl_pf *pf)
1528 uint16_t global_vf_num;
1529 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1534 ixl_dbg_iov(pf, "%s: begin\n", __func__);
1536 /* Re-enable VFLR interrupt cause so driver doesn't miss a
1537 * reset interrupt for another VF */
1538 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1539 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1540 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1543 for (i = 0; i < pf->num_vfs; i++) {
1544 global_vf_num = hw->func_caps.vf_base_id + i;
1547 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1550 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1551 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1552 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1553 if (vflrstat & vflrstat_mask) {
1554 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1557 ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1558 ixl_reinit_vf(pf, vf);
1559 ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1566 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1570 case I40E_AQ_RC_EPERM:
1572 case I40E_AQ_RC_ENOENT:
1574 case I40E_AQ_RC_ESRCH:
1576 case I40E_AQ_RC_EINTR:
1578 case I40E_AQ_RC_EIO:
1580 case I40E_AQ_RC_ENXIO:
1582 case I40E_AQ_RC_E2BIG:
1584 case I40E_AQ_RC_EAGAIN:
1586 case I40E_AQ_RC_ENOMEM:
1588 case I40E_AQ_RC_EACCES:
1590 case I40E_AQ_RC_EFAULT:
1592 case I40E_AQ_RC_EBUSY:
1594 case I40E_AQ_RC_EEXIST:
1596 case I40E_AQ_RC_EINVAL:
1598 case I40E_AQ_RC_ENOTTY:
1600 case I40E_AQ_RC_ENOSPC:
1602 case I40E_AQ_RC_ENOSYS:
1604 case I40E_AQ_RC_ERANGE:
1606 case I40E_AQ_RC_EFLUSHED:
1607 return (EINVAL); /* No exact equivalent in errno.h */
1608 case I40E_AQ_RC_BAD_ADDR:
1610 case I40E_AQ_RC_EMODE:
1612 case I40E_AQ_RC_EFBIG:
1620 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1622 struct i40e_hw *hw = &pf->hw;
1623 device_t dev = pf->dev;
1624 struct ixl_vsi *vsi = &pf->vsi;
1625 struct i40e_vsi_context ctxt;
1628 memset(&ctxt, 0, sizeof(ctxt));
1630 ctxt.seid = vsi->seid;
1631 if (pf->veb_seid != 0)
1632 ctxt.uplink_seid = pf->veb_seid;
1633 ctxt.pf_num = hw->pf_id;
1634 ctxt.connection_type = IXL_VSI_DATA_PORT;
1636 ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1637 ctxt.info.switch_id = (enable) ?
1638 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1640 /* error is set to 0 on success */
1641 error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1643 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1644 " aq_error %d\n", error, hw->aq.asq_last_status);
1651 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1653 struct ixl_pf *pf = iflib_get_softc(ctx);
1654 device_t dev = iflib_get_dev(ctx);
1656 struct ixl_vsi *pf_vsi;
1657 enum i40e_status_code ret;
1663 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1665 if (pf->vfs == NULL) {
1671 * Add the VEB and ...
1672 * - do nothing: VEPA mode
1673 * - enable loopback mode on connected VSIs: VEB mode
1675 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1676 1, FALSE, &pf->veb_seid, FALSE, NULL);
1677 if (ret != I40E_SUCCESS) {
1678 error = hw->aq.asq_last_status;
1679 device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1680 i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1683 if (pf->enable_vf_loopback)
1684 ixl_config_pf_vsi_loopback(pf, true);
1687 * Adding a VEB brings back the default MAC filter(s). Remove them,
1688 * and let the driver add the proper filters back.
1690 ixl_del_default_hw_filters(pf_vsi);
1691 ixl_reconfigure_filters(pf_vsi);
1693 pf->num_vfs = num_vfs;
1697 free(pf->vfs, M_IXL);
1703 ixl_if_iov_uninit(if_ctx_t ctx)
1705 struct ixl_pf *pf = iflib_get_softc(ctx);
1707 struct ixl_vsi *vsi;
1716 for (i = 0; i < pf->num_vfs; i++) {
1717 if (pf->vfs[i].vsi.seid != 0)
1718 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1719 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1720 ixl_free_filters(&pf->vfs[i].vsi.ftl);
1721 ixl_dbg_iov(pf, "VF %d: %d released\n",
1722 i, pf->vfs[i].qtag.num_allocated);
1723 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1726 if (pf->veb_seid != 0) {
1727 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1730 /* Reset PF VSI loopback mode */
1731 if (pf->enable_vf_loopback)
1732 ixl_config_pf_vsi_loopback(pf, false);
1735 num_vfs = pf->num_vfs;
1740 /* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1741 for (i = 0; i < num_vfs; i++)
1742 sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
1747 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1749 device_t dev = pf->dev;
1752 /* Validate, and clamp value if invalid */
1753 if (num_queues < 1 || num_queues > 16)
1754 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1755 num_queues, vf->vf_num);
1756 if (num_queues < 1) {
1757 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1759 } else if (num_queues > IAVF_MAX_QUEUES) {
1760 device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1761 num_queues = IAVF_MAX_QUEUES;
1763 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1765 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1766 num_queues, vf->vf_num);
1770 ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1771 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1772 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1778 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1780 struct ixl_pf *pf = iflib_get_softc(ctx);
1781 char sysctl_name[IXL_QUEUE_NAME_LEN];
1788 vf = &pf->vfs[vfnum];
1791 vf->vf_flags = VF_FLAG_ENABLED;
1793 /* Reserve queue allocation from PF */
1794 vf_num_queues = nvlist_get_number(params, "num-queues");
1795 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1799 error = ixl_vf_setup_vsi(pf, vf);
1803 if (nvlist_exists_binary(params, "mac-addr")) {
1804 mac = nvlist_get_binary(params, "mac-addr", &size);
1805 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1807 if (nvlist_get_bool(params, "allow-set-mac"))
1808 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1811 * If the administrator has not specified a MAC address then
1812 * we must allow the VF to choose one.
1814 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1816 if (nvlist_get_bool(params, "mac-anti-spoof"))
1817 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1819 if (nvlist_get_bool(params, "allow-promisc"))
1820 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1822 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1824 /* VF needs to be reset before it can be used */
1825 ixl_reset_vf(pf, vf);
1828 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1829 ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);