1 /******************************************************************************
3 Copyright (c) 2013-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixl_pf_iov.h"
37 /* Private functions */
38 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
42 static bool ixl_zero_mac(const uint8_t *addr);
43 static bool ixl_bcast_mac(const uint8_t *addr);
45 static int ixl_vc_opcode_level(uint16_t opcode);
47 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67 enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
81 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
84 ixl_initialize_sriov(struct ixl_pf *pf)
86 device_t dev = pf->dev;
87 struct i40e_hw *hw = &pf->hw;
88 nvlist_t *pf_schema, *vf_schema;
91 /* SR-IOV is only supported when MSI-X is in use. */
95 pf_schema = pci_iov_schema_alloc_node();
96 vf_schema = pci_iov_schema_alloc_node();
97 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
98 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
99 IOV_SCHEMA_HASDEFAULT, TRUE);
100 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
101 IOV_SCHEMA_HASDEFAULT, FALSE);
102 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
103 IOV_SCHEMA_HASDEFAULT, FALSE);
104 pci_iov_schema_add_uint16(vf_schema, "num-queues",
105 IOV_SCHEMA_HASDEFAULT,
106 max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
108 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
109 if (iov_error != 0) {
111 "Failed to initialize SR-IOV (error=%d)\n",
114 device_printf(dev, "SR-IOV ready\n");
116 pf->vc_debug_lvl = 1;
120 * Allocate the VSI for a VF.
123 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
128 struct i40e_vsi_context vsi_ctx;
130 enum i40e_status_code code;
136 vsi_ctx.pf_num = hw->pf_id;
137 vsi_ctx.uplink_seid = pf->veb_seid;
138 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
142 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
144 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 vsi_ctx.info.switch_id = htole16(0);
147 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
148 vsi_ctx.info.sec_flags = 0;
149 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
150 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
153 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
154 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156 vsi_ctx.info.valid_sections |=
157 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
158 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160 /* ERJ: Only scattered allocation is supported for VFs right now */
161 for (i = 0; i < vf->qtag.num_active; i++)
162 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
163 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
164 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166 vsi_ctx.info.tc_mapping[0] = htole16(
167 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
168 (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
171 if (code != I40E_SUCCESS)
172 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
173 vf->vsi.seid = vsi_ctx.seid;
174 vf->vsi.vsi_num = vsi_ctx.vsi_number;
175 // vf->vsi.first_queue = vf->qtag.qidx[0];
176 vf->vsi.num_queues = vf->qtag.num_active;
178 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
179 if (code != I40E_SUCCESS)
180 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
182 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
183 if (code != I40E_SUCCESS) {
184 device_printf(dev, "Failed to disable BW limit: %d\n",
185 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
186 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
189 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
194 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
201 error = ixl_vf_alloc_vsi(pf, vf);
205 vf->vsi.hw_filters_add = 0;
206 vf->vsi.hw_filters_del = 0;
207 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
208 ixl_reconfigure_filters(&vf->vsi);
214 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
221 * Two queues are mapped in a single register, so we have to do some
222 * gymnastics to convert the queue number into a register index and
226 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
228 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
229 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
230 qtable |= val << shift;
231 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
235 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
244 * Contiguous mappings aren't actually supported by the hardware,
245 * so we have to use non-contiguous mappings.
247 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
248 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
250 /* Enable LAN traffic on this VF */
251 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
252 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
254 /* Program index of each VF queue into PF queue space
255 * (This is only needed if QTABLE is enabled) */
256 for (i = 0; i < vf->vsi.num_queues; i++) {
257 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
258 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
260 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
262 for (; i < IXL_MAX_VSI_QUEUES; i++)
263 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
264 I40E_VPLAN_QTABLE_QINDEX_MASK);
266 /* Map queues allocated to VF to its VSI;
267 * This mapping matches the VF-wide mapping since the VF
268 * is only given a single VSI */
269 for (i = 0; i < vf->vsi.num_queues; i++)
270 ixl_vf_map_vsi_queue(hw, vf, i,
271 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
273 /* Set rest of VSI queues as unused. */
274 for (; i < IXL_MAX_VSI_QUEUES; i++)
275 ixl_vf_map_vsi_queue(hw, vf, i,
276 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
282 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
291 i40e_aq_delete_element(hw, vsi->seid, NULL);
295 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
298 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
303 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
306 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
307 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
312 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
315 uint32_t vfint_reg, vpint_reg;
320 ixl_vf_vsi_release(pf, &vf->vsi);
322 /* Index 0 has a special register. */
323 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
325 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
326 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
327 ixl_vf_disable_queue_intr(hw, vfint_reg);
330 /* Index 0 has a special register. */
331 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
333 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
334 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
335 ixl_vf_unregister_intr(hw, vpint_reg);
338 vf->vsi.num_queues = 0;
342 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
346 uint16_t global_vf_num;
350 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
352 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
353 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
354 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
355 ciad = rd32(hw, I40E_PF_PCI_CIAD);
356 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
365 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
372 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
373 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
374 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
377 ixl_reinit_vf(pf, vf);
381 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
384 uint32_t vfrstat, vfrtrig;
389 error = ixl_flush_pcie(pf, vf);
391 device_printf(pf->dev,
392 "Timed out waiting for PCIe activity to stop on VF-%d\n",
395 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
398 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
399 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
403 if (i == IXL_VF_RESET_TIMEOUT)
404 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
406 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
408 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
409 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
410 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
412 if (vf->vsi.seid != 0)
413 ixl_disable_rings(&vf->vsi);
415 ixl_vf_release_resources(pf, vf);
416 ixl_vf_setup_vsi(pf, vf);
417 ixl_vf_map_queues(pf, vf);
419 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
424 ixl_vc_opcode_level(uint16_t opcode)
427 case VIRTCHNL_OP_GET_STATS:
435 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
436 enum i40e_status_code status, void *msg, uint16_t len)
442 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
444 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
445 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
446 ixl_vc_opcode_str(op), op, status, vf->vf_num);
448 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
452 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
455 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
459 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
460 enum i40e_status_code status, const char *file, int line)
464 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
465 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
466 status, vf->vf_num, file, line);
467 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
471 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
474 struct virtchnl_version_info reply;
476 if (msg_size != sizeof(struct virtchnl_version_info)) {
477 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
482 vf->version = ((struct virtchnl_version_info *)msg)->minor;
484 reply.major = VIRTCHNL_VERSION_MAJOR;
485 reply.minor = VIRTCHNL_VERSION_MINOR;
486 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
491 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
496 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
501 ixl_reset_vf(pf, vf);
503 /* No response to a reset message. */
507 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
510 struct virtchnl_vf_resource reply;
512 if ((vf->version == 0 && msg_size != 0) ||
513 (vf->version == 1 && msg_size != 4)) {
514 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
515 " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
517 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
522 bzero(&reply, sizeof(reply));
524 if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
525 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
526 VIRTCHNL_VF_OFFLOAD_RSS_REG |
527 VIRTCHNL_VF_OFFLOAD_VLAN;
529 /* Force VF RSS setup by PF in 1.1+ VFs */
530 reply.vf_cap_flags = *(u32 *)msg & (
531 VIRTCHNL_VF_OFFLOAD_L2 |
532 VIRTCHNL_VF_OFFLOAD_RSS_PF |
533 VIRTCHNL_VF_OFFLOAD_VLAN);
536 reply.num_queue_pairs = vf->vsi.num_queues;
537 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
538 reply.rss_key_size = 52;
539 reply.rss_lut_size = 64;
540 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
541 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
542 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
543 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
545 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
546 I40E_SUCCESS, &reply, sizeof(reply));
550 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
551 struct virtchnl_txq_info *info)
554 struct i40e_hmc_obj_txq txq;
555 uint16_t global_queue_num, global_vf_num;
556 enum i40e_status_code status;
560 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
561 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
562 bzero(&txq, sizeof(txq));
564 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
565 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
567 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
568 if (status != I40E_SUCCESS)
571 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
573 txq.head_wb_ena = info->headwb_enabled;
574 txq.head_wb_addr = info->dma_headwb_addr;
575 txq.qlen = info->ring_len;
576 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
579 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
580 if (status != I40E_SUCCESS)
583 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
584 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
585 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
586 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
589 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
595 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
596 struct virtchnl_rxq_info *info)
599 struct i40e_hmc_obj_rxq rxq;
600 uint16_t global_queue_num;
601 enum i40e_status_code status;
604 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
605 bzero(&rxq, sizeof(rxq));
607 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
608 vf->vf_num, global_queue_num, info->queue_id);
610 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
613 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
614 info->max_pkt_size < ETHER_MIN_LEN)
617 if (info->splithdr_enabled) {
618 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
621 rxq.hsplit_0 = info->rx_split_pos &
622 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
623 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
624 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
625 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
626 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
631 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
632 if (status != I40E_SUCCESS)
635 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
636 rxq.qlen = info->ring_len;
638 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
644 rxq.rxmax = info->max_pkt_size;
645 rxq.tphrdesc_ena = 1;
646 rxq.tphwdesc_ena = 1;
652 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
653 if (status != I40E_SUCCESS)
656 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
662 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
665 struct virtchnl_vsi_queue_config_info *info;
666 struct virtchnl_queue_pair_info *pair;
667 uint16_t expected_msg_size;
670 if (msg_size < sizeof(*info)) {
671 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
677 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
678 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
679 vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
680 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
685 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
686 if (msg_size != expected_msg_size) {
687 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
688 vf->vf_num, msg_size, expected_msg_size);
689 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
694 if (info->vsi_id != vf->vsi.vsi_num) {
695 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
696 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
697 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
702 for (i = 0; i < info->num_queue_pairs; i++) {
703 pair = &info->qpair[i];
705 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
706 pair->rxq.vsi_id != vf->vsi.vsi_num ||
707 pair->txq.queue_id != pair->rxq.queue_id ||
708 pair->txq.queue_id >= vf->vsi.num_queues) {
710 i40e_send_vf_nack(pf, vf,
711 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
715 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
716 i40e_send_vf_nack(pf, vf,
717 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
721 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
722 i40e_send_vf_nack(pf, vf,
723 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
728 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
732 ixl_vf_set_qctl(struct ixl_pf *pf,
733 const struct virtchnl_vector_map *vector,
734 enum i40e_queue_type cur_type, uint16_t cur_queue,
735 enum i40e_queue_type *last_type, uint16_t *last_queue)
737 uint32_t offset, qctl;
740 if (cur_type == I40E_QUEUE_TYPE_RX) {
741 offset = I40E_QINT_RQCTL(cur_queue);
742 itr_indx = vector->rxitr_idx;
744 offset = I40E_QINT_TQCTL(cur_queue);
745 itr_indx = vector->txitr_idx;
748 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
749 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
750 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
751 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
752 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
754 wr32(&pf->hw, offset, qctl);
756 *last_type = cur_type;
757 *last_queue = cur_queue;
761 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
762 const struct virtchnl_vector_map *vector)
766 enum i40e_queue_type type, last_type;
768 uint16_t rxq_map, txq_map, cur_queue, last_queue;
772 rxq_map = vector->rxq_map;
773 txq_map = vector->txq_map;
775 last_queue = IXL_END_OF_INTR_LNKLST;
776 last_type = I40E_QUEUE_TYPE_RX;
779 * The datasheet says to optimize performance, RX queues and TX queues
780 * should be interleaved in the interrupt linked list, so we process
783 while ((rxq_map != 0) || (txq_map != 0)) {
785 qindex = ffs(txq_map) - 1;
786 type = I40E_QUEUE_TYPE_TX;
787 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
788 ixl_vf_set_qctl(pf, vector, type, cur_queue,
789 &last_type, &last_queue);
790 txq_map &= ~(1 << qindex);
794 qindex = ffs(rxq_map) - 1;
795 type = I40E_QUEUE_TYPE_RX;
796 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
797 ixl_vf_set_qctl(pf, vector, type, cur_queue,
798 &last_type, &last_queue);
799 rxq_map &= ~(1 << qindex);
803 if (vector->vector_id == 0)
804 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
806 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
809 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
810 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
816 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
819 struct virtchnl_irq_map_info *map;
820 struct virtchnl_vector_map *vector;
822 int i, largest_txq, largest_rxq;
826 if (msg_size < sizeof(*map)) {
827 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
833 if (map->num_vectors == 0) {
834 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
839 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
840 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
845 for (i = 0; i < map->num_vectors; i++) {
846 vector = &map->vecmap[i];
848 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
849 vector->vsi_id != vf->vsi.vsi_num) {
850 i40e_send_vf_nack(pf, vf,
851 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
855 if (vector->rxq_map != 0) {
856 largest_rxq = fls(vector->rxq_map) - 1;
857 if (largest_rxq >= vf->vsi.num_queues) {
858 i40e_send_vf_nack(pf, vf,
859 VIRTCHNL_OP_CONFIG_IRQ_MAP,
865 if (vector->txq_map != 0) {
866 largest_txq = fls(vector->txq_map) - 1;
867 if (largest_txq >= vf->vsi.num_queues) {
868 i40e_send_vf_nack(pf, vf,
869 VIRTCHNL_OP_CONFIG_IRQ_MAP,
875 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
876 vector->txitr_idx > IXL_MAX_ITR_IDX) {
877 i40e_send_vf_nack(pf, vf,
878 VIRTCHNL_OP_CONFIG_IRQ_MAP,
883 ixl_vf_config_vector(pf, vf, vector);
886 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
890 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
893 struct virtchnl_queue_select *select;
896 if (msg_size != sizeof(*select)) {
897 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
903 if (select->vsi_id != vf->vsi.vsi_num ||
904 select->rx_queues == 0 || select->tx_queues == 0) {
905 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
910 /* Enable TX rings selected by the VF */
911 for (int i = 0; i < 32; i++) {
912 if ((1 << i) & select->tx_queues) {
913 /* Warn if queue is out of VF allocation range */
914 if (i >= vf->vsi.num_queues) {
915 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
919 /* Skip this queue if it hasn't been configured */
920 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
922 /* Warn if this queue is already marked as enabled */
923 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
924 device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
927 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
931 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
935 /* Enable RX rings selected by the VF */
936 for (int i = 0; i < 32; i++) {
937 if ((1 << i) & select->rx_queues) {
938 /* Warn if queue is out of VF allocation range */
939 if (i >= vf->vsi.num_queues) {
940 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
944 /* Skip this queue if it hasn't been configured */
945 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
947 /* Warn if this queue is already marked as enabled */
948 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
949 device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
951 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
955 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
960 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
965 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
969 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
970 void *msg, uint16_t msg_size)
972 struct virtchnl_queue_select *select;
975 if (msg_size != sizeof(*select)) {
976 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
982 if (select->vsi_id != vf->vsi.vsi_num ||
983 select->rx_queues == 0 || select->tx_queues == 0) {
984 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
989 /* Disable TX rings selected by the VF */
990 for (int i = 0; i < 32; i++) {
991 if ((1 << i) & select->tx_queues) {
992 /* Warn if queue is out of VF allocation range */
993 if (i >= vf->vsi.num_queues) {
994 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
998 /* Skip this queue if it hasn't been configured */
999 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1001 /* Warn if this queue is already marked as disabled */
1002 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1003 device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1007 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1011 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1015 /* Enable RX rings selected by the VF */
1016 for (int i = 0; i < 32; i++) {
1017 if ((1 << i) & select->rx_queues) {
1018 /* Warn if queue is out of VF allocation range */
1019 if (i >= vf->vsi.num_queues) {
1020 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1024 /* Skip this queue if it hasn't been configured */
1025 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1027 /* Warn if this queue is already marked as disabled */
1028 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1029 device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1033 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1037 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1042 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1047 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1051 ixl_zero_mac(const uint8_t *addr)
1053 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1055 return (cmp_etheraddr(addr, zero));
1059 ixl_bcast_mac(const uint8_t *addr)
1062 return (cmp_etheraddr(addr, ixl_bcast_addr));
1066 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1069 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1073 * If the VF is not allowed to change its MAC address, don't let it
1074 * set a MAC filter for an address that is not a multicast address and
1075 * is not its assigned MAC.
1077 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1078 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1085 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1088 struct virtchnl_ether_addr_list *addr_list;
1089 struct virtchnl_ether_addr *addr;
1090 struct ixl_vsi *vsi;
1092 size_t expected_size;
1096 if (msg_size < sizeof(*addr_list)) {
1097 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1103 expected_size = sizeof(*addr_list) +
1104 addr_list->num_elements * sizeof(*addr);
1106 if (addr_list->num_elements == 0 ||
1107 addr_list->vsi_id != vsi->vsi_num ||
1108 msg_size != expected_size) {
1109 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1114 for (i = 0; i < addr_list->num_elements; i++) {
1115 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1116 i40e_send_vf_nack(pf, vf,
1117 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1122 for (i = 0; i < addr_list->num_elements; i++) {
1123 addr = &addr_list->list[i];
1124 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1127 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1131 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1134 struct virtchnl_ether_addr_list *addr_list;
1135 struct virtchnl_ether_addr *addr;
1136 size_t expected_size;
1139 if (msg_size < sizeof(*addr_list)) {
1140 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1146 expected_size = sizeof(*addr_list) +
1147 addr_list->num_elements * sizeof(*addr);
1149 if (addr_list->num_elements == 0 ||
1150 addr_list->vsi_id != vf->vsi.vsi_num ||
1151 msg_size != expected_size) {
1152 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1157 for (i = 0; i < addr_list->num_elements; i++) {
1158 addr = &addr_list->list[i];
1159 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1160 i40e_send_vf_nack(pf, vf,
1161 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1166 for (i = 0; i < addr_list->num_elements; i++) {
1167 addr = &addr_list->list[i];
1168 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1171 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1174 static enum i40e_status_code
1175 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1177 struct i40e_vsi_context vsi_ctx;
1179 vsi_ctx.seid = vf->vsi.seid;
1181 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1182 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1183 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1184 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1185 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1189 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1192 struct virtchnl_vlan_filter_list *filter_list;
1193 enum i40e_status_code code;
1194 size_t expected_size;
1197 if (msg_size < sizeof(*filter_list)) {
1198 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1204 expected_size = sizeof(*filter_list) +
1205 filter_list->num_elements * sizeof(uint16_t);
1206 if (filter_list->num_elements == 0 ||
1207 filter_list->vsi_id != vf->vsi.vsi_num ||
1208 msg_size != expected_size) {
1209 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1214 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1215 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1220 for (i = 0; i < filter_list->num_elements; i++) {
1221 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1222 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1228 code = ixl_vf_enable_vlan_strip(pf, vf);
1229 if (code != I40E_SUCCESS) {
1230 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1234 for (i = 0; i < filter_list->num_elements; i++)
1235 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1237 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1241 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1244 struct virtchnl_vlan_filter_list *filter_list;
1246 size_t expected_size;
1248 if (msg_size < sizeof(*filter_list)) {
1249 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1255 expected_size = sizeof(*filter_list) +
1256 filter_list->num_elements * sizeof(uint16_t);
1257 if (filter_list->num_elements == 0 ||
1258 filter_list->vsi_id != vf->vsi.vsi_num ||
1259 msg_size != expected_size) {
1260 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1265 for (i = 0; i < filter_list->num_elements; i++) {
1266 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1267 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1273 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1274 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1279 for (i = 0; i < filter_list->num_elements; i++)
1280 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1282 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1286 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1287 void *msg, uint16_t msg_size)
1289 struct virtchnl_promisc_info *info;
1290 enum i40e_status_code code;
1292 if (msg_size != sizeof(*info)) {
1293 i40e_send_vf_nack(pf, vf,
1294 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1298 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1299 i40e_send_vf_nack(pf, vf,
1300 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1305 if (info->vsi_id != vf->vsi.vsi_num) {
1306 i40e_send_vf_nack(pf, vf,
1307 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1311 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1312 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1313 if (code != I40E_SUCCESS) {
1314 i40e_send_vf_nack(pf, vf,
1315 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1319 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1320 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1321 if (code != I40E_SUCCESS) {
1322 i40e_send_vf_nack(pf, vf,
1323 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1327 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1331 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1334 struct virtchnl_queue_select *queue;
1336 if (msg_size != sizeof(*queue)) {
1337 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1343 if (queue->vsi_id != vf->vsi.vsi_num) {
1344 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1349 ixl_update_eth_stats(&vf->vsi);
1351 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1352 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1356 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1360 struct virtchnl_rss_key *key;
1361 struct i40e_aqc_get_set_rss_key_data key_data;
1362 enum i40e_status_code status;
1366 if (msg_size < sizeof(*key)) {
1367 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1374 if (key->key_len > 52) {
1375 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1376 vf->vf_num, key->key_len, 52);
1377 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1382 if (key->vsi_id != vf->vsi.vsi_num) {
1383 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1384 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1385 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1390 /* Fill out hash using MAC-dependent method */
1391 if (hw->mac.type == I40E_MAC_X722) {
1392 bzero(&key_data, sizeof(key_data));
1393 if (key->key_len <= 40)
1394 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1396 bcopy(key->key, key_data.standard_rss_key, 40);
1397 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1399 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1401 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1402 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1403 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1404 I40E_ERR_ADMIN_QUEUE_ERROR);
1408 for (int i = 0; i < (key->key_len / 4); i++)
1409 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1412 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1413 vf->vf_num, key->key[0]);
1415 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1419 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1423 struct virtchnl_rss_lut *lut;
1424 enum i40e_status_code status;
1428 if (msg_size < sizeof(*lut)) {
1429 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1436 if (lut->lut_entries > 64) {
1437 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1438 vf->vf_num, lut->lut_entries, 64);
1439 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1444 if (lut->vsi_id != vf->vsi.vsi_num) {
1445 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1446 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1447 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1452 /* Fill out LUT using MAC-dependent method */
1453 if (hw->mac.type == I40E_MAC_X722) {
1454 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1456 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1457 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1458 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1459 I40E_ERR_ADMIN_QUEUE_ERROR);
1463 for (int i = 0; i < (lut->lut_entries / 4); i++)
1464 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1467 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1468 vf->vf_num, lut->lut[0], lut->lut_entries);
1470 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1474 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1478 struct virtchnl_rss_hena *hena;
1482 if (msg_size < sizeof(*hena)) {
1483 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1491 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1492 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1494 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1495 vf->vf_num, hena->hena);
1497 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1501 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1503 struct virtchnl_pf_event event;
1507 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1508 event.severity = PF_EVENT_SEVERITY_INFO;
1509 event.event_data.link_event.link_status = pf->vsi.link_active;
1510 event.event_data.link_event.link_speed =
1511 (enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1513 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1518 ixl_broadcast_link_state(struct ixl_pf *pf)
1522 for (i = 0; i < pf->num_vfs; i++)
1523 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1527 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1531 uint16_t vf_num, msg_size;
1534 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1535 opcode = le32toh(event->desc.cookie_high);
1537 if (vf_num >= pf->num_vfs) {
1538 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1542 vf = &pf->vfs[vf_num];
1543 msg = event->msg_buf;
1544 msg_size = event->msg_len;
1546 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1547 "Got msg %s(%d) from%sVF-%d of size %d\n",
1548 ixl_vc_opcode_str(opcode), opcode,
1549 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1552 /* This must be a stray msg from a previously destroyed VF. */
1553 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1557 case VIRTCHNL_OP_VERSION:
1558 ixl_vf_version_msg(pf, vf, msg, msg_size);
1560 case VIRTCHNL_OP_RESET_VF:
1561 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1563 case VIRTCHNL_OP_GET_VF_RESOURCES:
1564 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1565 /* Notify VF of link state after it obtains queues, as this is
1566 * the last thing it will do as part of initialization
1568 ixl_notify_vf_link_state(pf, vf);
1570 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1571 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1573 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1574 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1576 case VIRTCHNL_OP_ENABLE_QUEUES:
1577 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1578 /* Notify VF of link state after it obtains queues, as this is
1579 * the last thing it will do as part of initialization
1581 ixl_notify_vf_link_state(pf, vf);
1583 case VIRTCHNL_OP_DISABLE_QUEUES:
1584 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1586 case VIRTCHNL_OP_ADD_ETH_ADDR:
1587 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1589 case VIRTCHNL_OP_DEL_ETH_ADDR:
1590 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1592 case VIRTCHNL_OP_ADD_VLAN:
1593 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1595 case VIRTCHNL_OP_DEL_VLAN:
1596 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1598 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1599 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1601 case VIRTCHNL_OP_GET_STATS:
1602 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1604 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1605 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1607 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1608 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1610 case VIRTCHNL_OP_SET_RSS_HENA:
1611 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1614 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1615 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1616 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1618 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1623 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1625 ixl_handle_vflr(void *arg, int pending)
1630 uint16_t global_vf_num;
1631 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1638 for (i = 0; i < pf->num_vfs; i++) {
1639 global_vf_num = hw->func_caps.vf_base_id + i;
1642 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1645 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1646 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1647 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1648 if (vflrstat & vflrstat_mask) {
1649 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1652 ixl_reinit_vf(pf, vf);
1656 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1657 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1658 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1665 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1669 case I40E_AQ_RC_EPERM:
1671 case I40E_AQ_RC_ENOENT:
1673 case I40E_AQ_RC_ESRCH:
1675 case I40E_AQ_RC_EINTR:
1677 case I40E_AQ_RC_EIO:
1679 case I40E_AQ_RC_ENXIO:
1681 case I40E_AQ_RC_E2BIG:
1683 case I40E_AQ_RC_EAGAIN:
1685 case I40E_AQ_RC_ENOMEM:
1687 case I40E_AQ_RC_EACCES:
1689 case I40E_AQ_RC_EFAULT:
1691 case I40E_AQ_RC_EBUSY:
1693 case I40E_AQ_RC_EEXIST:
1695 case I40E_AQ_RC_EINVAL:
1697 case I40E_AQ_RC_ENOTTY:
1699 case I40E_AQ_RC_ENOSPC:
1701 case I40E_AQ_RC_ENOSYS:
1703 case I40E_AQ_RC_ERANGE:
1705 case I40E_AQ_RC_EFLUSHED:
1706 return (EINVAL); /* No exact equivalent in errno.h */
1707 case I40E_AQ_RC_BAD_ADDR:
1709 case I40E_AQ_RC_EMODE:
1711 case I40E_AQ_RC_EFBIG:
1719 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1723 struct ixl_vsi *pf_vsi;
1724 enum i40e_status_code ret;
1727 pf = device_get_softc(dev);
1732 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1735 if (pf->vfs == NULL) {
1740 for (i = 0; i < num_vfs; i++)
1741 sysctl_ctx_init(&pf->vfs[i].ctx);
1743 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1744 1, FALSE, &pf->veb_seid, FALSE, NULL);
1745 if (ret != I40E_SUCCESS) {
1746 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1747 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1752 pf->num_vfs = num_vfs;
1757 free(pf->vfs, M_IXL);
1764 ixl_iov_uninit(device_t dev)
1768 struct ixl_vsi *vsi;
1773 pf = device_get_softc(dev);
1779 for (i = 0; i < pf->num_vfs; i++) {
1780 if (pf->vfs[i].vsi.seid != 0)
1781 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1782 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1783 ixl_free_mac_filters(&pf->vfs[i].vsi);
1784 DDPRINTF(dev, "VF %d: %d released\n",
1785 i, pf->vfs[i].qtag.num_allocated);
1786 DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1789 if (pf->veb_seid != 0) {
1790 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1795 num_vfs = pf->num_vfs;
1801 /* Do this after the unlock as sysctl_ctx_free might sleep. */
1802 for (i = 0; i < num_vfs; i++)
1803 sysctl_ctx_free(&vfs[i].ctx);
1808 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1810 device_t dev = pf->dev;
1813 /* Validate, and clamp value if invalid */
1814 if (num_queues < 1 || num_queues > 16)
1815 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1816 num_queues, vf->vf_num);
1817 if (num_queues < 1) {
1818 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1820 } else if (num_queues > 16) {
1821 device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1824 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1826 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1827 num_queues, vf->vf_num);
1831 DDPRINTF(dev, "VF %d: %d allocated, %d active",
1832 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1833 DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1839 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1841 char sysctl_name[QUEUE_NAME_LEN];
1849 pf = device_get_softc(dev);
1850 vf = &pf->vfs[vfnum];
1856 vf->vf_flags = VF_FLAG_ENABLED;
1857 SLIST_INIT(&vf->vsi.ftl);
1859 /* Reserve queue allocation from PF */
1860 vf_num_queues = nvlist_get_number(params, "num-queues");
1861 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1865 error = ixl_vf_setup_vsi(pf, vf);
1869 if (nvlist_exists_binary(params, "mac-addr")) {
1870 mac = nvlist_get_binary(params, "mac-addr", &size);
1871 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1873 if (nvlist_get_bool(params, "allow-set-mac"))
1874 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1877 * If the administrator has not specified a MAC address then
1878 * we must allow the VF to choose one.
1880 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1882 if (nvlist_get_bool(params, "mac-anti-spoof"))
1883 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1885 if (nvlist_get_bool(params, "allow-promisc"))
1886 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1888 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1890 ixl_reset_vf(pf, vf);
1894 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1895 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);