1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixl_pf_iov.h"
37 /* Private functions */
38 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
42 static bool ixl_zero_mac(const uint8_t *addr);
43 static bool ixl_bcast_mac(const uint8_t *addr);
45 static int ixl_vc_opcode_level(uint16_t opcode);
47 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67 enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
81 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
84 ixl_initialize_sriov(struct ixl_pf *pf)
88 device_t dev = pf->dev;
89 struct i40e_hw *hw = &pf->hw;
90 nvlist_t *pf_schema, *vf_schema;
93 pf_schema = pci_iov_schema_alloc_node();
94 vf_schema = pci_iov_schema_alloc_node();
95 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
96 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
97 IOV_SCHEMA_HASDEFAULT, TRUE);
98 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
99 IOV_SCHEMA_HASDEFAULT, FALSE);
100 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
101 IOV_SCHEMA_HASDEFAULT, FALSE);
102 pci_iov_schema_add_uint16(vf_schema, "num-queues",
103 IOV_SCHEMA_HASDEFAULT,
104 max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
106 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
107 if (iov_error != 0) {
109 "Failed to initialize SR-IOV (error=%d)\n",
112 device_printf(dev, "SR-IOV ready\n");
114 pf->vc_debug_lvl = 1;
120 * Allocate the VSI for a VF.
123 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
128 struct i40e_vsi_context vsi_ctx;
130 enum i40e_status_code code;
136 vsi_ctx.pf_num = hw->pf_id;
137 vsi_ctx.uplink_seid = pf->veb_seid;
138 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
142 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
144 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 vsi_ctx.info.switch_id = htole16(0);
147 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
148 vsi_ctx.info.sec_flags = 0;
149 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
150 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
152 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
153 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
154 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
156 vsi_ctx.info.valid_sections |=
157 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
158 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
160 /* ERJ: Only scattered allocation is supported for VFs right now */
161 for (i = 0; i < vf->qtag.num_active; i++)
162 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
163 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
164 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
166 vsi_ctx.info.tc_mapping[0] = htole16(
167 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
168 ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
170 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
171 if (code != I40E_SUCCESS)
172 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
173 vf->vsi.seid = vsi_ctx.seid;
174 vf->vsi.vsi_num = vsi_ctx.vsi_number;
175 // TODO: How to deal with num tx queues / num rx queues split?
176 // I don't think just assigning this variable is going to work
177 vf->vsi.num_rx_queues = vf->qtag.num_active;
178 vf->vsi.num_tx_queues = vf->qtag.num_active;
180 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
181 if (code != I40E_SUCCESS)
182 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
184 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
185 if (code != I40E_SUCCESS) {
186 device_printf(dev, "Failed to disable BW limit: %d\n",
187 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
191 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
196 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
203 error = ixl_vf_alloc_vsi(pf, vf);
207 vf->vsi.hw_filters_add = 0;
208 vf->vsi.hw_filters_del = 0;
209 // ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
210 ixl_reconfigure_filters(&vf->vsi);
216 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
223 * Two queues are mapped in a single register, so we have to do some
224 * gymnastics to convert the queue number into a register index and
228 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
230 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
231 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
232 qtable |= val << shift;
233 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
237 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
246 * Contiguous mappings aren't actually supported by the hardware,
247 * so we have to use non-contiguous mappings.
249 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
250 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
252 /* Enable LAN traffic on this VF */
253 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
254 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
256 /* Program index of each VF queue into PF queue space
257 * (This is only needed if QTABLE is enabled) */
258 for (i = 0; i < vf->vsi.num_tx_queues; i++) {
259 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
260 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
262 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
264 for (; i < IXL_MAX_VSI_QUEUES; i++)
265 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
266 I40E_VPLAN_QTABLE_QINDEX_MASK);
268 /* Map queues allocated to VF to its VSI;
269 * This mapping matches the VF-wide mapping since the VF
270 * is only given a single VSI */
271 for (i = 0; i < vf->vsi.num_tx_queues; i++)
272 ixl_vf_map_vsi_queue(hw, vf, i,
273 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
275 /* Set rest of VSI queues as unused. */
276 for (; i < IXL_MAX_VSI_QUEUES; i++)
277 ixl_vf_map_vsi_queue(hw, vf, i,
278 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
284 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
293 i40e_aq_delete_element(hw, vsi->seid, NULL);
297 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
300 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
305 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
308 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
309 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
314 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
317 uint32_t vfint_reg, vpint_reg;
322 ixl_vf_vsi_release(pf, &vf->vsi);
324 /* Index 0 has a special register. */
325 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
327 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
328 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
329 ixl_vf_disable_queue_intr(hw, vfint_reg);
332 /* Index 0 has a special register. */
333 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
335 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
336 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
337 ixl_vf_unregister_intr(hw, vpint_reg);
340 vf->vsi.num_tx_queues = 0;
341 vf->vsi.num_rx_queues = 0;
345 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
349 uint16_t global_vf_num;
353 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
355 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
356 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
357 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
358 ciad = rd32(hw, I40E_PF_PCI_CIAD);
359 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
368 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
375 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
376 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
377 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
380 ixl_reinit_vf(pf, vf);
384 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
387 uint32_t vfrstat, vfrtrig;
392 error = ixl_flush_pcie(pf, vf);
394 device_printf(pf->dev,
395 "Timed out waiting for PCIe activity to stop on VF-%d\n",
398 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
401 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
402 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
406 if (i == IXL_VF_RESET_TIMEOUT)
407 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
409 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
411 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
412 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
413 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
415 if (vf->vsi.seid != 0)
416 ixl_disable_rings(&vf->vsi);
418 ixl_vf_release_resources(pf, vf);
419 ixl_vf_setup_vsi(pf, vf);
420 ixl_vf_map_queues(pf, vf);
422 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
427 ixl_vc_opcode_level(uint16_t opcode)
430 case VIRTCHNL_OP_GET_STATS:
438 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
439 enum i40e_status_code status, void *msg, uint16_t len)
445 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
447 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
448 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
449 ixl_vc_opcode_str(op), op, status, vf->vf_num);
451 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
455 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
458 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
462 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
463 enum i40e_status_code status, const char *file, int line)
467 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
468 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
469 status, vf->vf_num, file, line);
470 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
474 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
477 struct virtchnl_version_info reply;
479 if (msg_size != sizeof(struct virtchnl_version_info)) {
480 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
485 vf->version = ((struct virtchnl_version_info *)msg)->minor;
487 reply.major = VIRTCHNL_VERSION_MAJOR;
488 reply.minor = VIRTCHNL_VERSION_MINOR;
489 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
494 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
499 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
504 ixl_reset_vf(pf, vf);
506 /* No response to a reset message. */
510 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
513 struct virtchnl_vf_resource reply;
515 if ((vf->version == 0 && msg_size != 0) ||
516 (vf->version == 1 && msg_size != 4)) {
517 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
518 " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
520 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
525 bzero(&reply, sizeof(reply));
527 if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
528 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
529 VIRTCHNL_VF_OFFLOAD_RSS_REG |
530 VIRTCHNL_VF_OFFLOAD_VLAN;
532 /* Force VF RSS setup by PF in 1.1+ VFs */
533 reply.vf_cap_flags = *(u32 *)msg & (
534 VIRTCHNL_VF_OFFLOAD_L2 |
535 VIRTCHNL_VF_OFFLOAD_RSS_PF |
536 VIRTCHNL_VF_OFFLOAD_VLAN);
539 reply.num_queue_pairs = vf->vsi.num_tx_queues;
540 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
541 reply.rss_key_size = 52;
542 reply.rss_lut_size = 64;
543 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
544 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
545 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
546 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
548 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
549 I40E_SUCCESS, &reply, sizeof(reply));
553 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
554 struct virtchnl_txq_info *info)
557 struct i40e_hmc_obj_txq txq;
558 uint16_t global_queue_num, global_vf_num;
559 enum i40e_status_code status;
563 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
564 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
565 bzero(&txq, sizeof(txq));
567 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
568 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
570 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
571 if (status != I40E_SUCCESS)
574 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
576 txq.head_wb_ena = info->headwb_enabled;
577 txq.head_wb_addr = info->dma_headwb_addr;
578 txq.qlen = info->ring_len;
579 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
582 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
583 if (status != I40E_SUCCESS)
586 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
587 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
588 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
589 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
592 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
598 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
599 struct virtchnl_rxq_info *info)
602 struct i40e_hmc_obj_rxq rxq;
603 uint16_t global_queue_num;
604 enum i40e_status_code status;
607 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
608 bzero(&rxq, sizeof(rxq));
610 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
611 vf->vf_num, global_queue_num, info->queue_id);
613 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
616 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
617 info->max_pkt_size < ETHER_MIN_LEN)
620 if (info->splithdr_enabled) {
621 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
624 rxq.hsplit_0 = info->rx_split_pos &
625 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
626 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
627 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
628 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
629 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
634 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
635 if (status != I40E_SUCCESS)
638 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
639 rxq.qlen = info->ring_len;
641 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
647 rxq.rxmax = info->max_pkt_size;
648 rxq.tphrdesc_ena = 1;
649 rxq.tphwdesc_ena = 1;
655 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
656 if (status != I40E_SUCCESS)
659 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
665 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
668 struct virtchnl_vsi_queue_config_info *info;
669 struct virtchnl_queue_pair_info *pair;
670 uint16_t expected_msg_size;
673 if (msg_size < sizeof(*info)) {
674 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
680 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
681 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
682 vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
683 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
688 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
689 if (msg_size != expected_msg_size) {
690 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
691 vf->vf_num, msg_size, expected_msg_size);
692 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
697 if (info->vsi_id != vf->vsi.vsi_num) {
698 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
699 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
700 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
705 for (i = 0; i < info->num_queue_pairs; i++) {
706 pair = &info->qpair[i];
708 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
709 pair->rxq.vsi_id != vf->vsi.vsi_num ||
710 pair->txq.queue_id != pair->rxq.queue_id ||
711 pair->txq.queue_id >= vf->vsi.num_tx_queues) {
713 i40e_send_vf_nack(pf, vf,
714 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
718 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
719 i40e_send_vf_nack(pf, vf,
720 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
724 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
725 i40e_send_vf_nack(pf, vf,
726 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
731 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
735 ixl_vf_set_qctl(struct ixl_pf *pf,
736 const struct virtchnl_vector_map *vector,
737 enum i40e_queue_type cur_type, uint16_t cur_queue,
738 enum i40e_queue_type *last_type, uint16_t *last_queue)
740 uint32_t offset, qctl;
743 if (cur_type == I40E_QUEUE_TYPE_RX) {
744 offset = I40E_QINT_RQCTL(cur_queue);
745 itr_indx = vector->rxitr_idx;
747 offset = I40E_QINT_TQCTL(cur_queue);
748 itr_indx = vector->txitr_idx;
751 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
752 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
753 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
754 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
755 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
757 wr32(&pf->hw, offset, qctl);
759 *last_type = cur_type;
760 *last_queue = cur_queue;
764 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
765 const struct virtchnl_vector_map *vector)
769 enum i40e_queue_type type, last_type;
771 uint16_t rxq_map, txq_map, cur_queue, last_queue;
775 rxq_map = vector->rxq_map;
776 txq_map = vector->txq_map;
778 last_queue = IXL_END_OF_INTR_LNKLST;
779 last_type = I40E_QUEUE_TYPE_RX;
782 * The datasheet says to optimize performance, RX queues and TX queues
783 * should be interleaved in the interrupt linked list, so we process
786 while ((rxq_map != 0) || (txq_map != 0)) {
788 qindex = ffs(txq_map) - 1;
789 type = I40E_QUEUE_TYPE_TX;
790 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
791 ixl_vf_set_qctl(pf, vector, type, cur_queue,
792 &last_type, &last_queue);
793 txq_map &= ~(1 << qindex);
797 qindex = ffs(rxq_map) - 1;
798 type = I40E_QUEUE_TYPE_RX;
799 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
800 ixl_vf_set_qctl(pf, vector, type, cur_queue,
801 &last_type, &last_queue);
802 rxq_map &= ~(1 << qindex);
806 if (vector->vector_id == 0)
807 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
809 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
812 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
813 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
819 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
822 struct virtchnl_irq_map_info *map;
823 struct virtchnl_vector_map *vector;
825 int i, largest_txq, largest_rxq;
829 if (msg_size < sizeof(*map)) {
830 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
836 if (map->num_vectors == 0) {
837 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
842 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
843 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
848 for (i = 0; i < map->num_vectors; i++) {
849 vector = &map->vecmap[i];
851 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
852 vector->vsi_id != vf->vsi.vsi_num) {
853 i40e_send_vf_nack(pf, vf,
854 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
858 if (vector->rxq_map != 0) {
859 largest_rxq = fls(vector->rxq_map) - 1;
860 if (largest_rxq >= vf->vsi.num_rx_queues) {
861 i40e_send_vf_nack(pf, vf,
862 VIRTCHNL_OP_CONFIG_IRQ_MAP,
868 if (vector->txq_map != 0) {
869 largest_txq = fls(vector->txq_map) - 1;
870 if (largest_txq >= vf->vsi.num_tx_queues) {
871 i40e_send_vf_nack(pf, vf,
872 VIRTCHNL_OP_CONFIG_IRQ_MAP,
878 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
879 vector->txitr_idx > IXL_MAX_ITR_IDX) {
880 i40e_send_vf_nack(pf, vf,
881 VIRTCHNL_OP_CONFIG_IRQ_MAP,
886 ixl_vf_config_vector(pf, vf, vector);
889 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
893 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
896 struct virtchnl_queue_select *select;
899 if (msg_size != sizeof(*select)) {
900 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
906 if (select->vsi_id != vf->vsi.vsi_num ||
907 select->rx_queues == 0 || select->tx_queues == 0) {
908 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
913 /* Enable TX rings selected by the VF */
914 for (int i = 0; i < 32; i++) {
915 if ((1 << i) & select->tx_queues) {
916 /* Warn if queue is out of VF allocation range */
917 if (i >= vf->vsi.num_tx_queues) {
918 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
922 /* Skip this queue if it hasn't been configured */
923 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
925 /* Warn if this queue is already marked as enabled */
926 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
927 device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
930 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
934 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
938 /* Enable RX rings selected by the VF */
939 for (int i = 0; i < 32; i++) {
940 if ((1 << i) & select->rx_queues) {
941 /* Warn if queue is out of VF allocation range */
942 if (i >= vf->vsi.num_rx_queues) {
943 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
947 /* Skip this queue if it hasn't been configured */
948 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
950 /* Warn if this queue is already marked as enabled */
951 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
952 device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
954 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
958 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
963 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
968 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
972 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
973 void *msg, uint16_t msg_size)
975 struct virtchnl_queue_select *select;
978 if (msg_size != sizeof(*select)) {
979 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
985 if (select->vsi_id != vf->vsi.vsi_num ||
986 select->rx_queues == 0 || select->tx_queues == 0) {
987 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
992 /* Disable TX rings selected by the VF */
993 for (int i = 0; i < 32; i++) {
994 if ((1 << i) & select->tx_queues) {
995 /* Warn if queue is out of VF allocation range */
996 if (i >= vf->vsi.num_tx_queues) {
997 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1001 /* Skip this queue if it hasn't been configured */
1002 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1004 /* Warn if this queue is already marked as disabled */
1005 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1006 device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1010 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1014 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1018 /* Enable RX rings selected by the VF */
1019 for (int i = 0; i < 32; i++) {
1020 if ((1 << i) & select->rx_queues) {
1021 /* Warn if queue is out of VF allocation range */
1022 if (i >= vf->vsi.num_rx_queues) {
1023 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1027 /* Skip this queue if it hasn't been configured */
1028 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1030 /* Warn if this queue is already marked as disabled */
1031 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1032 device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1036 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1040 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1045 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1050 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1054 ixl_zero_mac(const uint8_t *addr)
1056 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1058 return (cmp_etheraddr(addr, zero));
1062 ixl_bcast_mac(const uint8_t *addr)
1064 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
1065 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1067 return (cmp_etheraddr(addr, ixl_bcast_addr));
1071 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1074 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1078 * If the VF is not allowed to change its MAC address, don't let it
1079 * set a MAC filter for an address that is not a multicast address and
1080 * is not its assigned MAC.
1082 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1083 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1090 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1093 struct virtchnl_ether_addr_list *addr_list;
1094 struct virtchnl_ether_addr *addr;
1095 struct ixl_vsi *vsi;
1097 size_t expected_size;
1101 if (msg_size < sizeof(*addr_list)) {
1102 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1108 expected_size = sizeof(*addr_list) +
1109 addr_list->num_elements * sizeof(*addr);
1111 if (addr_list->num_elements == 0 ||
1112 addr_list->vsi_id != vsi->vsi_num ||
1113 msg_size != expected_size) {
1114 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1119 for (i = 0; i < addr_list->num_elements; i++) {
1120 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1121 i40e_send_vf_nack(pf, vf,
1122 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1127 for (i = 0; i < addr_list->num_elements; i++) {
1128 addr = &addr_list->list[i];
1129 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1132 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1136 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1139 struct virtchnl_ether_addr_list *addr_list;
1140 struct virtchnl_ether_addr *addr;
1141 size_t expected_size;
1144 if (msg_size < sizeof(*addr_list)) {
1145 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1151 expected_size = sizeof(*addr_list) +
1152 addr_list->num_elements * sizeof(*addr);
1154 if (addr_list->num_elements == 0 ||
1155 addr_list->vsi_id != vf->vsi.vsi_num ||
1156 msg_size != expected_size) {
1157 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1162 for (i = 0; i < addr_list->num_elements; i++) {
1163 addr = &addr_list->list[i];
1164 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1165 i40e_send_vf_nack(pf, vf,
1166 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1171 for (i = 0; i < addr_list->num_elements; i++) {
1172 addr = &addr_list->list[i];
1173 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1176 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1179 static enum i40e_status_code
1180 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1182 struct i40e_vsi_context vsi_ctx;
1184 vsi_ctx.seid = vf->vsi.seid;
1186 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1187 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1188 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1189 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1190 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1194 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1197 struct virtchnl_vlan_filter_list *filter_list;
1198 enum i40e_status_code code;
1199 size_t expected_size;
1202 if (msg_size < sizeof(*filter_list)) {
1203 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1209 expected_size = sizeof(*filter_list) +
1210 filter_list->num_elements * sizeof(uint16_t);
1211 if (filter_list->num_elements == 0 ||
1212 filter_list->vsi_id != vf->vsi.vsi_num ||
1213 msg_size != expected_size) {
1214 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1219 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1220 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1225 for (i = 0; i < filter_list->num_elements; i++) {
1226 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1227 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1233 code = ixl_vf_enable_vlan_strip(pf, vf);
1234 if (code != I40E_SUCCESS) {
1235 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1239 for (i = 0; i < filter_list->num_elements; i++)
1240 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1242 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1246 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1249 struct virtchnl_vlan_filter_list *filter_list;
1251 size_t expected_size;
1253 if (msg_size < sizeof(*filter_list)) {
1254 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1260 expected_size = sizeof(*filter_list) +
1261 filter_list->num_elements * sizeof(uint16_t);
1262 if (filter_list->num_elements == 0 ||
1263 filter_list->vsi_id != vf->vsi.vsi_num ||
1264 msg_size != expected_size) {
1265 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1270 for (i = 0; i < filter_list->num_elements; i++) {
1271 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1272 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1278 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1279 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1284 for (i = 0; i < filter_list->num_elements; i++)
1285 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1287 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1291 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1292 void *msg, uint16_t msg_size)
1294 struct virtchnl_promisc_info *info;
1295 enum i40e_status_code code;
1297 if (msg_size != sizeof(*info)) {
1298 i40e_send_vf_nack(pf, vf,
1299 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1303 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1304 i40e_send_vf_nack(pf, vf,
1305 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1310 if (info->vsi_id != vf->vsi.vsi_num) {
1311 i40e_send_vf_nack(pf, vf,
1312 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1316 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1317 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1318 if (code != I40E_SUCCESS) {
1319 i40e_send_vf_nack(pf, vf,
1320 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1324 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1325 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1326 if (code != I40E_SUCCESS) {
1327 i40e_send_vf_nack(pf, vf,
1328 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1332 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1336 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1339 struct virtchnl_queue_select *queue;
1341 if (msg_size != sizeof(*queue)) {
1342 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1348 if (queue->vsi_id != vf->vsi.vsi_num) {
1349 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1354 ixl_update_eth_stats(&vf->vsi);
1356 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1357 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1361 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1365 struct virtchnl_rss_key *key;
1366 struct i40e_aqc_get_set_rss_key_data key_data;
1367 enum i40e_status_code status;
1371 if (msg_size < sizeof(*key)) {
1372 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1379 if (key->key_len > 52) {
1380 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1381 vf->vf_num, key->key_len, 52);
1382 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1387 if (key->vsi_id != vf->vsi.vsi_num) {
1388 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1389 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1390 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1395 /* Fill out hash using MAC-dependent method */
1396 if (hw->mac.type == I40E_MAC_X722) {
1397 bzero(&key_data, sizeof(key_data));
1398 if (key->key_len <= 40)
1399 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1401 bcopy(key->key, key_data.standard_rss_key, 40);
1402 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1404 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1406 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1407 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1408 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1409 I40E_ERR_ADMIN_QUEUE_ERROR);
1413 for (int i = 0; i < (key->key_len / 4); i++)
1414 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1417 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1418 vf->vf_num, key->key[0]);
1420 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1424 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1428 struct virtchnl_rss_lut *lut;
1429 enum i40e_status_code status;
1433 if (msg_size < sizeof(*lut)) {
1434 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1441 if (lut->lut_entries > 64) {
1442 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1443 vf->vf_num, lut->lut_entries, 64);
1444 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1449 if (lut->vsi_id != vf->vsi.vsi_num) {
1450 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1451 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1452 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1457 /* Fill out LUT using MAC-dependent method */
1458 if (hw->mac.type == I40E_MAC_X722) {
1459 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1461 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1462 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1463 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1464 I40E_ERR_ADMIN_QUEUE_ERROR);
1468 for (int i = 0; i < (lut->lut_entries / 4); i++)
1469 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1472 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1473 vf->vf_num, lut->lut[0], lut->lut_entries);
1475 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1479 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1483 struct virtchnl_rss_hena *hena;
1487 if (msg_size < sizeof(*hena)) {
1488 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1496 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1497 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1499 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1500 vf->vf_num, hena->hena);
1502 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1506 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1508 struct virtchnl_pf_event event;
1512 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1513 event.severity = PF_EVENT_SEVERITY_INFO;
1514 event.event_data.link_event.link_status = pf->vsi.link_active;
1515 event.event_data.link_event.link_speed =
1516 (enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1518 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1523 ixl_broadcast_link_state(struct ixl_pf *pf)
1527 for (i = 0; i < pf->num_vfs; i++)
1528 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1532 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1536 uint16_t vf_num, msg_size;
1539 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1540 opcode = le32toh(event->desc.cookie_high);
1542 if (vf_num >= pf->num_vfs) {
1543 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1547 vf = &pf->vfs[vf_num];
1548 msg = event->msg_buf;
1549 msg_size = event->msg_len;
1551 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1552 "Got msg %s(%d) from%sVF-%d of size %d\n",
1553 ixl_vc_opcode_str(opcode), opcode,
1554 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1557 /* This must be a stray msg from a previously destroyed VF. */
1558 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1562 case VIRTCHNL_OP_VERSION:
1563 ixl_vf_version_msg(pf, vf, msg, msg_size);
1565 case VIRTCHNL_OP_RESET_VF:
1566 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1568 case VIRTCHNL_OP_GET_VF_RESOURCES:
1569 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1570 /* Notify VF of link state after it obtains queues, as this is
1571 * the last thing it will do as part of initialization
1573 ixl_notify_vf_link_state(pf, vf);
1575 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1576 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1578 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1579 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1581 case VIRTCHNL_OP_ENABLE_QUEUES:
1582 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1583 /* Notify VF of link state after it obtains queues, as this is
1584 * the last thing it will do as part of initialization
1586 ixl_notify_vf_link_state(pf, vf);
1588 case VIRTCHNL_OP_DISABLE_QUEUES:
1589 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1591 case VIRTCHNL_OP_ADD_ETH_ADDR:
1592 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1594 case VIRTCHNL_OP_DEL_ETH_ADDR:
1595 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1597 case VIRTCHNL_OP_ADD_VLAN:
1598 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1600 case VIRTCHNL_OP_DEL_VLAN:
1601 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1603 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1604 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1606 case VIRTCHNL_OP_GET_STATS:
1607 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1609 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1610 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1612 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1613 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1615 case VIRTCHNL_OP_SET_RSS_HENA:
1616 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1619 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1620 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1621 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1623 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1628 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1630 ixl_handle_vflr(void *arg, int pending)
1635 uint16_t global_vf_num;
1636 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1642 /* TODO: May need to lock this */
1643 for (i = 0; i < pf->num_vfs; i++) {
1644 global_vf_num = hw->func_caps.vf_base_id + i;
1647 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1650 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1651 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1652 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1653 if (vflrstat & vflrstat_mask) {
1654 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1657 ixl_reinit_vf(pf, vf);
1661 atomic_clear_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
1662 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1663 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1664 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1671 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1675 case I40E_AQ_RC_EPERM:
1677 case I40E_AQ_RC_ENOENT:
1679 case I40E_AQ_RC_ESRCH:
1681 case I40E_AQ_RC_EINTR:
1683 case I40E_AQ_RC_EIO:
1685 case I40E_AQ_RC_ENXIO:
1687 case I40E_AQ_RC_E2BIG:
1689 case I40E_AQ_RC_EAGAIN:
1691 case I40E_AQ_RC_ENOMEM:
1693 case I40E_AQ_RC_EACCES:
1695 case I40E_AQ_RC_EFAULT:
1697 case I40E_AQ_RC_EBUSY:
1699 case I40E_AQ_RC_EEXIST:
1701 case I40E_AQ_RC_EINVAL:
1703 case I40E_AQ_RC_ENOTTY:
1705 case I40E_AQ_RC_ENOSPC:
1707 case I40E_AQ_RC_ENOSYS:
1709 case I40E_AQ_RC_ERANGE:
1711 case I40E_AQ_RC_EFLUSHED:
1712 return (EINVAL); /* No exact equivalent in errno.h */
1713 case I40E_AQ_RC_BAD_ADDR:
1715 case I40E_AQ_RC_EMODE:
1717 case I40E_AQ_RC_EFBIG:
1725 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1729 struct ixl_vsi *pf_vsi;
1730 enum i40e_status_code ret;
1733 pf = device_get_softc(dev);
1738 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1741 if (pf->vfs == NULL) {
1746 for (i = 0; i < num_vfs; i++)
1747 sysctl_ctx_init(&pf->vfs[i].ctx);
1749 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1750 1, FALSE, &pf->veb_seid, FALSE, NULL);
1751 if (ret != I40E_SUCCESS) {
1752 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1753 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1758 pf->num_vfs = num_vfs;
1759 //IXL_PF_UNLOCK(pf);
1763 free(pf->vfs, M_IXL);
1765 //IXL_PF_UNLOCK(pf);
1770 ixl_iov_uninit(device_t dev)
1774 struct ixl_vsi *vsi;
1779 pf = device_get_softc(dev);
1785 for (i = 0; i < pf->num_vfs; i++) {
1786 if (pf->vfs[i].vsi.seid != 0)
1787 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1788 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1789 ixl_free_mac_filters(&pf->vfs[i].vsi);
1790 DDPRINTF(dev, "VF %d: %d released\n",
1791 i, pf->vfs[i].qtag.num_allocated);
1792 DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1795 if (pf->veb_seid != 0) {
1796 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1801 num_vfs = pf->num_vfs;
1805 //IXL_PF_UNLOCK(pf);
1807 /* Do this after the unlock as sysctl_ctx_free might sleep. */
1808 for (i = 0; i < num_vfs; i++)
1809 sysctl_ctx_free(&vfs[i].ctx);
1814 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1816 device_t dev = pf->dev;
1819 /* Validate, and clamp value if invalid */
1820 if (num_queues < 1 || num_queues > 16)
1821 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1822 num_queues, vf->vf_num);
1823 if (num_queues < 1) {
1824 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1826 } else if (num_queues > 16) {
1827 device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1830 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1832 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1833 num_queues, vf->vf_num);
1837 DDPRINTF(dev, "VF %d: %d allocated, %d active",
1838 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1839 DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1845 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1847 char sysctl_name[QUEUE_NAME_LEN];
1855 pf = device_get_softc(dev);
1856 vf = &pf->vfs[vfnum];
1862 vf->vf_flags = VF_FLAG_ENABLED;
1863 SLIST_INIT(&vf->vsi.ftl);
1865 /* Reserve queue allocation from PF */
1866 vf_num_queues = nvlist_get_number(params, "num-queues");
1867 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1871 error = ixl_vf_setup_vsi(pf, vf);
1875 if (nvlist_exists_binary(params, "mac-addr")) {
1876 mac = nvlist_get_binary(params, "mac-addr", &size);
1877 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1879 if (nvlist_get_bool(params, "allow-set-mac"))
1880 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1883 * If the administrator has not specified a MAC address then
1884 * we must allow the VF to choose one.
1886 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1888 if (nvlist_get_bool(params, "mac-anti-spoof"))
1889 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1891 if (nvlist_get_bool(params, "allow-promisc"))
1892 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1894 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1896 ixl_reset_vf(pf, vf);
1898 //IXL_PF_UNLOCK(pf);
1900 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1901 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);