1 /******************************************************************************
3 Copyright (c) 2013-2019, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixl_pf_iov.h"
37 /* Private functions */
38 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
42 static bool ixl_zero_mac(const uint8_t *addr);
43 static bool ixl_bcast_mac(const uint8_t *addr);
45 static int ixl_vc_opcode_level(uint16_t opcode);
47 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67 enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
81 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
84 ixl_initialize_sriov(struct ixl_pf *pf)
86 device_t dev = pf->dev;
87 struct i40e_hw *hw = &pf->hw;
88 nvlist_t *pf_schema, *vf_schema;
91 /* SR-IOV is only supported when MSI-X is in use. */
95 pf_schema = pci_iov_schema_alloc_node();
96 vf_schema = pci_iov_schema_alloc_node();
97 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
98 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
99 IOV_SCHEMA_HASDEFAULT, TRUE);
100 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
101 IOV_SCHEMA_HASDEFAULT, FALSE);
102 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
103 IOV_SCHEMA_HASDEFAULT, FALSE);
104 pci_iov_schema_add_uint16(vf_schema, "num-queues",
105 IOV_SCHEMA_HASDEFAULT,
106 max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
108 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
109 if (iov_error != 0) {
111 "Failed to initialize SR-IOV (error=%d)\n",
114 device_printf(dev, "SR-IOV ready\n");
116 pf->vc_debug_lvl = 1;
120 * Allocate the VSI for a VF.
123 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
128 struct i40e_vsi_context vsi_ctx;
130 enum i40e_status_code code;
136 vsi_ctx.pf_num = hw->pf_id;
137 vsi_ctx.uplink_seid = pf->veb_seid;
138 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
142 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
144 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 vsi_ctx.info.switch_id = htole16(0);
147 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
148 vsi_ctx.info.sec_flags = 0;
149 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
150 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
153 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
154 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156 vsi_ctx.info.valid_sections |=
157 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
158 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160 /* ERJ: Only scattered allocation is supported for VFs right now */
161 for (i = 0; i < vf->qtag.num_active; i++)
162 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
163 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
164 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166 vsi_ctx.info.tc_mapping[0] = htole16(
167 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
168 (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
171 if (code != I40E_SUCCESS)
172 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
173 vf->vsi.seid = vsi_ctx.seid;
174 vf->vsi.vsi_num = vsi_ctx.vsi_number;
175 // vf->vsi.first_queue = vf->qtag.qidx[0];
176 vf->vsi.num_queues = vf->qtag.num_active;
178 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
179 if (code != I40E_SUCCESS)
180 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
182 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
183 if (code != I40E_SUCCESS) {
184 device_printf(dev, "Failed to disable BW limit: %d\n",
185 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
186 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
189 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
194 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
200 vf->vsi.flags |= IXL_FLAGS_IS_VF;
202 error = ixl_vf_alloc_vsi(pf, vf);
206 vf->vsi.dev = pf->dev;
207 vf->vsi.num_hw_filters = 0;
209 ixl_init_filters(&vf->vsi);
215 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
222 * Two queues are mapped in a single register, so we have to do some
223 * gymnastics to convert the queue number into a register index and
227 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
229 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
230 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
231 qtable |= val << shift;
232 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
236 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
245 * Contiguous mappings aren't actually supported by the hardware,
246 * so we have to use non-contiguous mappings.
248 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
249 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
251 /* Enable LAN traffic on this VF */
252 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
253 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
255 /* Program index of each VF queue into PF queue space
256 * (This is only needed if QTABLE is enabled) */
257 for (i = 0; i < vf->vsi.num_queues; i++) {
258 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
259 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
261 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
263 for (; i < IXL_MAX_VSI_QUEUES; i++)
264 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
265 I40E_VPLAN_QTABLE_QINDEX_MASK);
267 /* Map queues allocated to VF to its VSI;
268 * This mapping matches the VF-wide mapping since the VF
269 * is only given a single VSI */
270 for (i = 0; i < vf->vsi.num_queues; i++)
271 ixl_vf_map_vsi_queue(hw, vf, i,
272 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
274 /* Set rest of VSI queues as unused. */
275 for (; i < IXL_MAX_VSI_QUEUES; i++)
276 ixl_vf_map_vsi_queue(hw, vf, i,
277 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
283 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
292 i40e_aq_delete_element(hw, vsi->seid, NULL);
296 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
299 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
304 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
307 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
308 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
313 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
316 uint32_t vfint_reg, vpint_reg;
321 ixl_vf_vsi_release(pf, &vf->vsi);
323 /* Index 0 has a special register. */
324 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
326 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
327 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
328 ixl_vf_disable_queue_intr(hw, vfint_reg);
331 /* Index 0 has a special register. */
332 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
334 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
335 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
336 ixl_vf_unregister_intr(hw, vpint_reg);
339 vf->vsi.num_queues = 0;
343 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
347 uint16_t global_vf_num;
351 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
353 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
354 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
355 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
356 ciad = rd32(hw, I40E_PF_PCI_CIAD);
357 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
366 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
373 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
374 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
375 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
378 ixl_reinit_vf(pf, vf);
382 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
385 uint32_t vfrstat, vfrtrig;
390 error = ixl_flush_pcie(pf, vf);
392 device_printf(pf->dev,
393 "Timed out waiting for PCIe activity to stop on VF-%d\n",
396 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
399 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
400 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
404 if (i == IXL_VF_RESET_TIMEOUT)
405 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
407 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
409 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
410 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
411 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
413 if (vf->vsi.seid != 0)
414 ixl_disable_rings(&vf->vsi);
416 ixl_vf_release_resources(pf, vf);
417 ixl_vf_setup_vsi(pf, vf);
418 ixl_vf_map_queues(pf, vf);
420 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
425 ixl_vc_opcode_level(uint16_t opcode)
428 case VIRTCHNL_OP_GET_STATS:
436 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
437 enum i40e_status_code status, void *msg, uint16_t len)
443 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
445 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
446 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
447 ixl_vc_opcode_str(op), op, status, vf->vf_num);
449 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
453 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
456 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
460 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
461 enum i40e_status_code status, const char *file, int line)
465 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
466 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
467 status, vf->vf_num, file, line);
468 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
472 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
475 struct virtchnl_version_info reply;
477 if (msg_size != sizeof(struct virtchnl_version_info)) {
478 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
483 vf->version = ((struct virtchnl_version_info *)msg)->minor;
485 reply.major = VIRTCHNL_VERSION_MAJOR;
486 reply.minor = VIRTCHNL_VERSION_MINOR;
487 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
492 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
497 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
502 ixl_reset_vf(pf, vf);
504 /* No response to a reset message. */
508 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
511 struct virtchnl_vf_resource reply;
513 if ((vf->version == 0 && msg_size != 0) ||
514 (vf->version == 1 && msg_size != 4)) {
515 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
516 " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
518 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
523 bzero(&reply, sizeof(reply));
525 if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
526 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
527 VIRTCHNL_VF_OFFLOAD_RSS_REG |
528 VIRTCHNL_VF_OFFLOAD_VLAN;
530 /* Force VF RSS setup by PF in 1.1+ VFs */
531 reply.vf_cap_flags = *(u32 *)msg & (
532 VIRTCHNL_VF_OFFLOAD_L2 |
533 VIRTCHNL_VF_OFFLOAD_RSS_PF |
534 VIRTCHNL_VF_OFFLOAD_VLAN);
537 reply.num_queue_pairs = vf->vsi.num_queues;
538 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
539 reply.rss_key_size = 52;
540 reply.rss_lut_size = 64;
541 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
542 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
543 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
544 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
546 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
547 I40E_SUCCESS, &reply, sizeof(reply));
551 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
552 struct virtchnl_txq_info *info)
555 struct i40e_hmc_obj_txq txq;
556 uint16_t global_queue_num, global_vf_num;
557 enum i40e_status_code status;
561 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
562 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
563 bzero(&txq, sizeof(txq));
565 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
566 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
568 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
569 if (status != I40E_SUCCESS)
572 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
574 txq.head_wb_ena = info->headwb_enabled;
575 txq.head_wb_addr = info->dma_headwb_addr;
576 txq.qlen = info->ring_len;
577 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
580 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
581 if (status != I40E_SUCCESS)
584 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
585 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
586 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
587 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
590 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
596 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
597 struct virtchnl_rxq_info *info)
600 struct i40e_hmc_obj_rxq rxq;
601 uint16_t global_queue_num;
602 enum i40e_status_code status;
605 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
606 bzero(&rxq, sizeof(rxq));
608 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
609 vf->vf_num, global_queue_num, info->queue_id);
611 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
614 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
615 info->max_pkt_size < ETHER_MIN_LEN)
618 if (info->splithdr_enabled) {
619 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
622 rxq.hsplit_0 = info->rx_split_pos &
623 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
624 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
625 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
626 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
627 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
632 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
633 if (status != I40E_SUCCESS)
636 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
637 rxq.qlen = info->ring_len;
639 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
645 rxq.rxmax = info->max_pkt_size;
646 rxq.tphrdesc_ena = 1;
647 rxq.tphwdesc_ena = 1;
653 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
654 if (status != I40E_SUCCESS)
657 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
663 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
666 struct virtchnl_vsi_queue_config_info *info;
667 struct virtchnl_queue_pair_info *pair;
668 uint16_t expected_msg_size;
671 if (msg_size < sizeof(*info)) {
672 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
678 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
679 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
680 vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
681 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
686 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
687 if (msg_size != expected_msg_size) {
688 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
689 vf->vf_num, msg_size, expected_msg_size);
690 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
695 if (info->vsi_id != vf->vsi.vsi_num) {
696 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
697 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
698 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
703 for (i = 0; i < info->num_queue_pairs; i++) {
704 pair = &info->qpair[i];
706 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
707 pair->rxq.vsi_id != vf->vsi.vsi_num ||
708 pair->txq.queue_id != pair->rxq.queue_id ||
709 pair->txq.queue_id >= vf->vsi.num_queues) {
711 i40e_send_vf_nack(pf, vf,
712 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
716 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
717 i40e_send_vf_nack(pf, vf,
718 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
722 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
723 i40e_send_vf_nack(pf, vf,
724 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
729 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
733 ixl_vf_set_qctl(struct ixl_pf *pf,
734 const struct virtchnl_vector_map *vector,
735 enum i40e_queue_type cur_type, uint16_t cur_queue,
736 enum i40e_queue_type *last_type, uint16_t *last_queue)
738 uint32_t offset, qctl;
741 if (cur_type == I40E_QUEUE_TYPE_RX) {
742 offset = I40E_QINT_RQCTL(cur_queue);
743 itr_indx = vector->rxitr_idx;
745 offset = I40E_QINT_TQCTL(cur_queue);
746 itr_indx = vector->txitr_idx;
749 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
750 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
751 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
752 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
753 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
755 wr32(&pf->hw, offset, qctl);
757 *last_type = cur_type;
758 *last_queue = cur_queue;
762 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
763 const struct virtchnl_vector_map *vector)
767 enum i40e_queue_type type, last_type;
769 uint16_t rxq_map, txq_map, cur_queue, last_queue;
773 rxq_map = vector->rxq_map;
774 txq_map = vector->txq_map;
776 last_queue = IXL_END_OF_INTR_LNKLST;
777 last_type = I40E_QUEUE_TYPE_RX;
780 * The datasheet says to optimize performance, RX queues and TX queues
781 * should be interleaved in the interrupt linked list, so we process
784 while ((rxq_map != 0) || (txq_map != 0)) {
786 qindex = ffs(txq_map) - 1;
787 type = I40E_QUEUE_TYPE_TX;
788 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
789 ixl_vf_set_qctl(pf, vector, type, cur_queue,
790 &last_type, &last_queue);
791 txq_map &= ~(1 << qindex);
795 qindex = ffs(rxq_map) - 1;
796 type = I40E_QUEUE_TYPE_RX;
797 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
798 ixl_vf_set_qctl(pf, vector, type, cur_queue,
799 &last_type, &last_queue);
800 rxq_map &= ~(1 << qindex);
804 if (vector->vector_id == 0)
805 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
807 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
810 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
811 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
817 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
820 struct virtchnl_irq_map_info *map;
821 struct virtchnl_vector_map *vector;
823 int i, largest_txq, largest_rxq;
827 if (msg_size < sizeof(*map)) {
828 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
834 if (map->num_vectors == 0) {
835 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
840 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
841 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
846 for (i = 0; i < map->num_vectors; i++) {
847 vector = &map->vecmap[i];
849 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
850 vector->vsi_id != vf->vsi.vsi_num) {
851 i40e_send_vf_nack(pf, vf,
852 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
856 if (vector->rxq_map != 0) {
857 largest_rxq = fls(vector->rxq_map) - 1;
858 if (largest_rxq >= vf->vsi.num_queues) {
859 i40e_send_vf_nack(pf, vf,
860 VIRTCHNL_OP_CONFIG_IRQ_MAP,
866 if (vector->txq_map != 0) {
867 largest_txq = fls(vector->txq_map) - 1;
868 if (largest_txq >= vf->vsi.num_queues) {
869 i40e_send_vf_nack(pf, vf,
870 VIRTCHNL_OP_CONFIG_IRQ_MAP,
876 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
877 vector->txitr_idx > IXL_MAX_ITR_IDX) {
878 i40e_send_vf_nack(pf, vf,
879 VIRTCHNL_OP_CONFIG_IRQ_MAP,
884 ixl_vf_config_vector(pf, vf, vector);
887 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
891 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
894 struct virtchnl_queue_select *select;
897 if (msg_size != sizeof(*select)) {
898 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
904 if (select->vsi_id != vf->vsi.vsi_num ||
905 select->rx_queues == 0 || select->tx_queues == 0) {
906 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
911 /* Enable TX rings selected by the VF */
912 for (int i = 0; i < 32; i++) {
913 if ((1 << i) & select->tx_queues) {
914 /* Warn if queue is out of VF allocation range */
915 if (i >= vf->vsi.num_queues) {
916 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
920 /* Skip this queue if it hasn't been configured */
921 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
923 /* Warn if this queue is already marked as enabled */
924 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
925 device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
928 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
932 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
936 /* Enable RX rings selected by the VF */
937 for (int i = 0; i < 32; i++) {
938 if ((1 << i) & select->rx_queues) {
939 /* Warn if queue is out of VF allocation range */
940 if (i >= vf->vsi.num_queues) {
941 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
945 /* Skip this queue if it hasn't been configured */
946 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
948 /* Warn if this queue is already marked as enabled */
949 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
950 device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
952 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
956 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
961 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
966 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
970 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
971 void *msg, uint16_t msg_size)
973 struct virtchnl_queue_select *select;
976 if (msg_size != sizeof(*select)) {
977 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
983 if (select->vsi_id != vf->vsi.vsi_num ||
984 select->rx_queues == 0 || select->tx_queues == 0) {
985 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
990 /* Disable TX rings selected by the VF */
991 for (int i = 0; i < 32; i++) {
992 if ((1 << i) & select->tx_queues) {
993 /* Warn if queue is out of VF allocation range */
994 if (i >= vf->vsi.num_queues) {
995 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
999 /* Skip this queue if it hasn't been configured */
1000 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1002 /* Warn if this queue is already marked as disabled */
1003 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1004 device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1008 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1012 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1016 /* Enable RX rings selected by the VF */
1017 for (int i = 0; i < 32; i++) {
1018 if ((1 << i) & select->rx_queues) {
1019 /* Warn if queue is out of VF allocation range */
1020 if (i >= vf->vsi.num_queues) {
1021 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1025 /* Skip this queue if it hasn't been configured */
1026 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1028 /* Warn if this queue is already marked as disabled */
1029 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1030 device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1034 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1038 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1043 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1048 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1052 ixl_zero_mac(const uint8_t *addr)
1054 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1056 return (cmp_etheraddr(addr, zero));
1060 ixl_bcast_mac(const uint8_t *addr)
1063 return (cmp_etheraddr(addr, ixl_bcast_addr));
1067 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1070 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1074 * If the VF is not allowed to change its MAC address, don't let it
1075 * set a MAC filter for an address that is not a multicast address and
1076 * is not its assigned MAC.
1078 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1079 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1086 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1089 struct virtchnl_ether_addr_list *addr_list;
1090 struct virtchnl_ether_addr *addr;
1091 struct ixl_vsi *vsi;
1093 size_t expected_size;
1097 if (msg_size < sizeof(*addr_list)) {
1098 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1104 expected_size = sizeof(*addr_list) +
1105 addr_list->num_elements * sizeof(*addr);
1107 if (addr_list->num_elements == 0 ||
1108 addr_list->vsi_id != vsi->vsi_num ||
1109 msg_size != expected_size) {
1110 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1115 for (i = 0; i < addr_list->num_elements; i++) {
1116 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1117 i40e_send_vf_nack(pf, vf,
1118 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1123 for (i = 0; i < addr_list->num_elements; i++) {
1124 addr = &addr_list->list[i];
1125 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1128 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1132 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1135 struct virtchnl_ether_addr_list *addr_list;
1136 struct virtchnl_ether_addr *addr;
1137 size_t expected_size;
1140 if (msg_size < sizeof(*addr_list)) {
1141 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1147 expected_size = sizeof(*addr_list) +
1148 addr_list->num_elements * sizeof(*addr);
1150 if (addr_list->num_elements == 0 ||
1151 addr_list->vsi_id != vf->vsi.vsi_num ||
1152 msg_size != expected_size) {
1153 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1158 for (i = 0; i < addr_list->num_elements; i++) {
1159 addr = &addr_list->list[i];
1160 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1161 i40e_send_vf_nack(pf, vf,
1162 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1167 for (i = 0; i < addr_list->num_elements; i++) {
1168 addr = &addr_list->list[i];
1169 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1172 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1175 static enum i40e_status_code
1176 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1178 struct i40e_vsi_context vsi_ctx;
1180 vsi_ctx.seid = vf->vsi.seid;
1182 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1183 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1184 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1185 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1186 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1190 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1193 struct virtchnl_vlan_filter_list *filter_list;
1194 enum i40e_status_code code;
1195 size_t expected_size;
1198 if (msg_size < sizeof(*filter_list)) {
1199 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1205 expected_size = sizeof(*filter_list) +
1206 filter_list->num_elements * sizeof(uint16_t);
1207 if (filter_list->num_elements == 0 ||
1208 filter_list->vsi_id != vf->vsi.vsi_num ||
1209 msg_size != expected_size) {
1210 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1215 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1216 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1221 for (i = 0; i < filter_list->num_elements; i++) {
1222 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1223 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1229 code = ixl_vf_enable_vlan_strip(pf, vf);
1230 if (code != I40E_SUCCESS) {
1231 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1235 for (i = 0; i < filter_list->num_elements; i++)
1236 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1238 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1242 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1245 struct virtchnl_vlan_filter_list *filter_list;
1247 size_t expected_size;
1249 if (msg_size < sizeof(*filter_list)) {
1250 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1256 expected_size = sizeof(*filter_list) +
1257 filter_list->num_elements * sizeof(uint16_t);
1258 if (filter_list->num_elements == 0 ||
1259 filter_list->vsi_id != vf->vsi.vsi_num ||
1260 msg_size != expected_size) {
1261 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1266 for (i = 0; i < filter_list->num_elements; i++) {
1267 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1268 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1274 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1275 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1280 for (i = 0; i < filter_list->num_elements; i++)
1281 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1283 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1287 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1288 void *msg, uint16_t msg_size)
1290 struct virtchnl_promisc_info *info;
1291 enum i40e_status_code code;
1293 if (msg_size != sizeof(*info)) {
1294 i40e_send_vf_nack(pf, vf,
1295 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1299 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1300 i40e_send_vf_nack(pf, vf,
1301 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1306 if (info->vsi_id != vf->vsi.vsi_num) {
1307 i40e_send_vf_nack(pf, vf,
1308 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1312 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1313 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1314 if (code != I40E_SUCCESS) {
1315 i40e_send_vf_nack(pf, vf,
1316 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1320 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1321 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1322 if (code != I40E_SUCCESS) {
1323 i40e_send_vf_nack(pf, vf,
1324 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1328 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1332 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1335 struct virtchnl_queue_select *queue;
1337 if (msg_size != sizeof(*queue)) {
1338 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1344 if (queue->vsi_id != vf->vsi.vsi_num) {
1345 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1350 ixl_update_eth_stats(&vf->vsi);
1352 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1353 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1357 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1361 struct virtchnl_rss_key *key;
1362 struct i40e_aqc_get_set_rss_key_data key_data;
1363 enum i40e_status_code status;
1367 if (msg_size < sizeof(*key)) {
1368 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1375 if (key->key_len > 52) {
1376 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1377 vf->vf_num, key->key_len, 52);
1378 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1383 if (key->vsi_id != vf->vsi.vsi_num) {
1384 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1385 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1386 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1391 /* Fill out hash using MAC-dependent method */
1392 if (hw->mac.type == I40E_MAC_X722) {
1393 bzero(&key_data, sizeof(key_data));
1394 if (key->key_len <= 40)
1395 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1397 bcopy(key->key, key_data.standard_rss_key, 40);
1398 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1400 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1402 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1403 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1404 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1405 I40E_ERR_ADMIN_QUEUE_ERROR);
1409 for (int i = 0; i < (key->key_len / 4); i++)
1410 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1413 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1414 vf->vf_num, key->key[0]);
1416 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1420 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1424 struct virtchnl_rss_lut *lut;
1425 enum i40e_status_code status;
1429 if (msg_size < sizeof(*lut)) {
1430 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1437 if (lut->lut_entries > 64) {
1438 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1439 vf->vf_num, lut->lut_entries, 64);
1440 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1445 if (lut->vsi_id != vf->vsi.vsi_num) {
1446 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1447 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1448 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1453 /* Fill out LUT using MAC-dependent method */
1454 if (hw->mac.type == I40E_MAC_X722) {
1455 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1457 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1458 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1459 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1460 I40E_ERR_ADMIN_QUEUE_ERROR);
1464 for (int i = 0; i < (lut->lut_entries / 4); i++)
1465 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1468 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1469 vf->vf_num, lut->lut[0], lut->lut_entries);
1471 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1475 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1479 struct virtchnl_rss_hena *hena;
1483 if (msg_size < sizeof(*hena)) {
1484 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1492 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1493 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1495 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1496 vf->vf_num, hena->hena);
1498 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1502 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1504 struct virtchnl_pf_event event;
1508 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1509 event.severity = PF_EVENT_SEVERITY_INFO;
1510 event.event_data.link_event.link_status = pf->vsi.link_active;
1511 event.event_data.link_event.link_speed =
1512 (enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1514 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1519 ixl_broadcast_link_state(struct ixl_pf *pf)
1523 for (i = 0; i < pf->num_vfs; i++)
1524 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1528 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1532 uint16_t vf_num, msg_size;
1535 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1536 opcode = le32toh(event->desc.cookie_high);
1538 if (vf_num >= pf->num_vfs) {
1539 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1543 vf = &pf->vfs[vf_num];
1544 msg = event->msg_buf;
1545 msg_size = event->msg_len;
1547 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1548 "Got msg %s(%d) from%sVF-%d of size %d\n",
1549 ixl_vc_opcode_str(opcode), opcode,
1550 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1553 /* This must be a stray msg from a previously destroyed VF. */
1554 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1558 case VIRTCHNL_OP_VERSION:
1559 ixl_vf_version_msg(pf, vf, msg, msg_size);
1561 case VIRTCHNL_OP_RESET_VF:
1562 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1564 case VIRTCHNL_OP_GET_VF_RESOURCES:
1565 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1566 /* Notify VF of link state after it obtains queues, as this is
1567 * the last thing it will do as part of initialization
1569 ixl_notify_vf_link_state(pf, vf);
1571 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1572 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1574 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1575 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1577 case VIRTCHNL_OP_ENABLE_QUEUES:
1578 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1579 /* Notify VF of link state after it obtains queues, as this is
1580 * the last thing it will do as part of initialization
1582 ixl_notify_vf_link_state(pf, vf);
1584 case VIRTCHNL_OP_DISABLE_QUEUES:
1585 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1587 case VIRTCHNL_OP_ADD_ETH_ADDR:
1588 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1590 case VIRTCHNL_OP_DEL_ETH_ADDR:
1591 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1593 case VIRTCHNL_OP_ADD_VLAN:
1594 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1596 case VIRTCHNL_OP_DEL_VLAN:
1597 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1599 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1600 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1602 case VIRTCHNL_OP_GET_STATS:
1603 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1605 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1606 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1608 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1609 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1611 case VIRTCHNL_OP_SET_RSS_HENA:
1612 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1615 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1616 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1617 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1619 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1624 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1626 ixl_handle_vflr(void *arg, int pending)
1631 uint16_t global_vf_num;
1632 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1639 for (i = 0; i < pf->num_vfs; i++) {
1640 global_vf_num = hw->func_caps.vf_base_id + i;
1643 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1646 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1647 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1648 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1649 if (vflrstat & vflrstat_mask) {
1650 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1653 ixl_reinit_vf(pf, vf);
1657 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1658 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1659 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1666 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1670 case I40E_AQ_RC_EPERM:
1672 case I40E_AQ_RC_ENOENT:
1674 case I40E_AQ_RC_ESRCH:
1676 case I40E_AQ_RC_EINTR:
1678 case I40E_AQ_RC_EIO:
1680 case I40E_AQ_RC_ENXIO:
1682 case I40E_AQ_RC_E2BIG:
1684 case I40E_AQ_RC_EAGAIN:
1686 case I40E_AQ_RC_ENOMEM:
1688 case I40E_AQ_RC_EACCES:
1690 case I40E_AQ_RC_EFAULT:
1692 case I40E_AQ_RC_EBUSY:
1694 case I40E_AQ_RC_EEXIST:
1696 case I40E_AQ_RC_EINVAL:
1698 case I40E_AQ_RC_ENOTTY:
1700 case I40E_AQ_RC_ENOSPC:
1702 case I40E_AQ_RC_ENOSYS:
1704 case I40E_AQ_RC_ERANGE:
1706 case I40E_AQ_RC_EFLUSHED:
1707 return (EINVAL); /* No exact equivalent in errno.h */
1708 case I40E_AQ_RC_BAD_ADDR:
1710 case I40E_AQ_RC_EMODE:
1712 case I40E_AQ_RC_EFBIG:
1720 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1724 struct ixl_vsi *pf_vsi;
1725 enum i40e_status_code ret;
1728 pf = device_get_softc(dev);
1733 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1736 if (pf->vfs == NULL) {
1741 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1742 1, FALSE, &pf->veb_seid, FALSE, NULL);
1743 if (ret != I40E_SUCCESS) {
1744 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1745 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1751 * Adding a VEB brings back the default MAC filter(s). Remove them,
1752 * and let the driver add the proper filters back.
1754 ixl_del_default_hw_filters(pf_vsi);
1755 ixl_reconfigure_filters(pf_vsi);
1757 pf->num_vfs = num_vfs;
1762 free(pf->vfs, M_IXL);
1769 ixl_iov_uninit(device_t dev)
1773 struct ixl_vsi *vsi;
1778 pf = device_get_softc(dev);
1784 for (i = 0; i < pf->num_vfs; i++) {
1785 if (pf->vfs[i].vsi.seid != 0)
1786 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1787 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1788 ixl_free_mac_filters(&pf->vfs[i].vsi);
1789 DDPRINTF(dev, "VF %d: %d released\n",
1790 i, pf->vfs[i].qtag.num_allocated);
1791 DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1794 if (pf->veb_seid != 0) {
1795 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1800 num_vfs = pf->num_vfs;
1806 /* Do this after the unlock as sysctl_ctx_free might sleep. */
1807 for (i = 0; i < num_vfs; i++)
1808 sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
1813 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1815 device_t dev = pf->dev;
1818 /* Validate, and clamp value if invalid */
1819 if (num_queues < 1 || num_queues > 16)
1820 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1821 num_queues, vf->vf_num);
1822 if (num_queues < 1) {
1823 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1825 } else if (num_queues > 16) {
1826 device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1829 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1831 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1832 num_queues, vf->vf_num);
1836 DDPRINTF(dev, "VF %d: %d allocated, %d active",
1837 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1838 DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1844 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1846 char sysctl_name[IXL_QUEUE_NAME_LEN];
1854 pf = device_get_softc(dev);
1855 vf = &pf->vfs[vfnum];
1860 vf->vf_flags = VF_FLAG_ENABLED;
1862 /* Reserve queue allocation from PF */
1863 vf_num_queues = nvlist_get_number(params, "num-queues");
1864 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1868 error = ixl_vf_setup_vsi(pf, vf);
1872 if (nvlist_exists_binary(params, "mac-addr")) {
1873 mac = nvlist_get_binary(params, "mac-addr", &size);
1874 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1876 if (nvlist_get_bool(params, "allow-set-mac"))
1877 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1880 * If the administrator has not specified a MAC address then
1881 * we must allow the VF to choose one.
1883 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1885 if (nvlist_get_bool(params, "mac-anti-spoof"))
1886 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1888 if (nvlist_get_bool(params, "allow-promisc"))
1889 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1891 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1893 ixl_reset_vf(pf, vf);
1897 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1898 ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);