1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixl_pf_iov.h"
37 /* Private functions */
38 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
42 static bool ixl_zero_mac(const uint8_t *addr);
43 static bool ixl_bcast_mac(const uint8_t *addr);
45 static const char * ixl_vc_opcode_str(uint16_t op);
46 static int ixl_vc_opcode_level(uint16_t opcode);
48 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
50 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
54 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
55 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
58 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
59 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
60 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
61 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
64 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
65 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
66 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
68 enum i40e_queue_type *last_type, uint16_t *last_queue);
69 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
70 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
76 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
80 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
82 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
85 ixl_initialize_sriov(struct ixl_pf *pf)
87 device_t dev = pf->dev;
88 struct i40e_hw *hw = &pf->hw;
89 nvlist_t *pf_schema, *vf_schema;
92 /* SR-IOV is only supported when MSI-X is in use. */
96 pf_schema = pci_iov_schema_alloc_node();
97 vf_schema = pci_iov_schema_alloc_node();
98 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
99 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
100 IOV_SCHEMA_HASDEFAULT, TRUE);
101 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
102 IOV_SCHEMA_HASDEFAULT, FALSE);
103 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
104 IOV_SCHEMA_HASDEFAULT, FALSE);
105 pci_iov_schema_add_uint16(vf_schema, "num-queues",
106 IOV_SCHEMA_HASDEFAULT,
107 max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
109 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
110 if (iov_error != 0) {
112 "Failed to initialize SR-IOV (error=%d)\n",
115 device_printf(dev, "SR-IOV ready\n");
117 pf->vc_debug_lvl = 1;
121 * Allocate the VSI for a VF.
124 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
129 struct i40e_vsi_context vsi_ctx;
131 enum i40e_status_code code;
137 vsi_ctx.pf_num = hw->pf_id;
138 vsi_ctx.uplink_seid = pf->veb_seid;
139 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
140 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
141 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
143 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
145 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
146 vsi_ctx.info.switch_id = htole16(0);
148 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
149 vsi_ctx.info.sec_flags = 0;
150 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
151 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
153 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
154 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
155 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
157 vsi_ctx.info.valid_sections |=
158 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
159 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
161 /* ERJ: Only scattered allocation is supported for VFs right now */
162 for (i = 0; i < vf->qtag.num_active; i++)
163 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
164 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
165 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
167 vsi_ctx.info.tc_mapping[0] = htole16(
168 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
169 (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
171 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
172 if (code != I40E_SUCCESS)
173 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
174 vf->vsi.seid = vsi_ctx.seid;
175 vf->vsi.vsi_num = vsi_ctx.vsi_number;
176 // vf->vsi.first_queue = vf->qtag.qidx[0];
177 vf->vsi.num_queues = vf->qtag.num_active;
179 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
180 if (code != I40E_SUCCESS)
181 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
183 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
184 if (code != I40E_SUCCESS) {
185 device_printf(dev, "Failed to disable BW limit: %d\n",
186 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
190 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
195 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
202 error = ixl_vf_alloc_vsi(pf, vf);
206 vf->vsi.hw_filters_add = 0;
207 vf->vsi.hw_filters_del = 0;
208 ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
209 ixl_reconfigure_filters(&vf->vsi);
215 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
222 * Two queues are mapped in a single register, so we have to do some
223 * gymnastics to convert the queue number into a register index and
227 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
229 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
230 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
231 qtable |= val << shift;
232 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
236 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
245 * Contiguous mappings aren't actually supported by the hardware,
246 * so we have to use non-contiguous mappings.
248 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
249 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
251 /* Enable LAN traffic on this VF */
252 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
253 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
255 /* Program index of each VF queue into PF queue space
256 * (This is only needed if QTABLE is enabled) */
257 for (i = 0; i < vf->vsi.num_queues; i++) {
258 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
259 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
261 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
263 for (; i < IXL_MAX_VSI_QUEUES; i++)
264 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
265 I40E_VPLAN_QTABLE_QINDEX_MASK);
267 /* Map queues allocated to VF to its VSI;
268 * This mapping matches the VF-wide mapping since the VF
269 * is only given a single VSI */
270 for (i = 0; i < vf->vsi.num_queues; i++)
271 ixl_vf_map_vsi_queue(hw, vf, i,
272 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
274 /* Set rest of VSI queues as unused. */
275 for (; i < IXL_MAX_VSI_QUEUES; i++)
276 ixl_vf_map_vsi_queue(hw, vf, i,
277 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
283 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
292 i40e_aq_delete_element(hw, vsi->seid, NULL);
296 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
299 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
304 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
307 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
308 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
313 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
316 uint32_t vfint_reg, vpint_reg;
321 ixl_vf_vsi_release(pf, &vf->vsi);
323 /* Index 0 has a special register. */
324 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
326 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
327 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
328 ixl_vf_disable_queue_intr(hw, vfint_reg);
331 /* Index 0 has a special register. */
332 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
334 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
335 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
336 ixl_vf_unregister_intr(hw, vpint_reg);
339 vf->vsi.num_queues = 0;
343 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
347 uint16_t global_vf_num;
351 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
353 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
354 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
355 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
356 ciad = rd32(hw, I40E_PF_PCI_CIAD);
357 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
366 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
373 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
374 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
375 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
378 ixl_reinit_vf(pf, vf);
382 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
385 uint32_t vfrstat, vfrtrig;
390 error = ixl_flush_pcie(pf, vf);
392 device_printf(pf->dev,
393 "Timed out waiting for PCIe activity to stop on VF-%d\n",
396 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
399 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
400 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
404 if (i == IXL_VF_RESET_TIMEOUT)
405 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
407 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
409 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
410 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
411 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
413 if (vf->vsi.seid != 0)
414 ixl_disable_rings(&vf->vsi);
416 ixl_vf_release_resources(pf, vf);
417 ixl_vf_setup_vsi(pf, vf);
418 ixl_vf_map_queues(pf, vf);
420 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
425 ixl_vc_opcode_str(uint16_t op)
429 case I40E_VIRTCHNL_OP_VERSION:
431 case I40E_VIRTCHNL_OP_RESET_VF:
433 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
434 return ("GET_VF_RESOURCES");
435 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
436 return ("CONFIG_TX_QUEUE");
437 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
438 return ("CONFIG_RX_QUEUE");
439 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
440 return ("CONFIG_VSI_QUEUES");
441 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
442 return ("CONFIG_IRQ_MAP");
443 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
444 return ("ENABLE_QUEUES");
445 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
446 return ("DISABLE_QUEUES");
447 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
448 return ("ADD_ETHER_ADDRESS");
449 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
450 return ("DEL_ETHER_ADDRESS");
451 case I40E_VIRTCHNL_OP_ADD_VLAN:
453 case I40E_VIRTCHNL_OP_DEL_VLAN:
455 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
456 return ("CONFIG_PROMISCUOUS_MODE");
457 case I40E_VIRTCHNL_OP_GET_STATS:
458 return ("GET_STATS");
459 case I40E_VIRTCHNL_OP_FCOE:
461 case I40E_VIRTCHNL_OP_EVENT:
463 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
464 return ("CONFIG_RSS_KEY");
465 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
466 return ("CONFIG_RSS_LUT");
467 case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
468 return ("GET_RSS_HENA_CAPS");
469 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
470 return ("SET_RSS_HENA");
477 ixl_vc_opcode_level(uint16_t opcode)
480 case I40E_VIRTCHNL_OP_GET_STATS:
488 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
489 enum i40e_status_code status, void *msg, uint16_t len)
495 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
497 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
498 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
499 ixl_vc_opcode_str(op), op, status, vf->vf_num);
501 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
505 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
508 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
512 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
513 enum i40e_status_code status, const char *file, int line)
517 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
518 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
519 status, vf->vf_num, file, line);
520 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
524 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
527 struct i40e_virtchnl_version_info reply;
529 if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
530 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
535 vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
537 reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
538 reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
539 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
544 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
549 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
554 ixl_reset_vf(pf, vf);
556 /* No response to a reset message. */
560 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
563 struct i40e_virtchnl_vf_resource reply;
565 if ((vf->version == 0 && msg_size != 0) ||
566 (vf->version == 1 && msg_size != 4)) {
567 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
568 " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
570 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
575 bzero(&reply, sizeof(reply));
577 if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
578 reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
579 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
580 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
582 /* Force VF RSS setup by PF in 1.1+ VFs */
583 reply.vf_offload_flags = *(u32 *)msg & (
584 I40E_VIRTCHNL_VF_OFFLOAD_L2 |
585 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
586 I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
589 reply.num_queue_pairs = vf->vsi.num_queues;
590 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
591 reply.rss_key_size = 52;
592 reply.rss_lut_size = 64;
593 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
594 reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
595 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
596 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
598 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
599 I40E_SUCCESS, &reply, sizeof(reply));
603 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
604 struct i40e_virtchnl_txq_info *info)
607 struct i40e_hmc_obj_txq txq;
608 uint16_t global_queue_num, global_vf_num;
609 enum i40e_status_code status;
613 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
614 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
615 bzero(&txq, sizeof(txq));
617 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
618 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
620 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
621 if (status != I40E_SUCCESS)
624 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
626 txq.head_wb_ena = info->headwb_enabled;
627 txq.head_wb_addr = info->dma_headwb_addr;
628 txq.qlen = info->ring_len;
629 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
632 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
633 if (status != I40E_SUCCESS)
636 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
637 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
638 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
639 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
642 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
648 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
649 struct i40e_virtchnl_rxq_info *info)
652 struct i40e_hmc_obj_rxq rxq;
653 uint16_t global_queue_num;
654 enum i40e_status_code status;
657 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
658 bzero(&rxq, sizeof(rxq));
660 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
661 vf->vf_num, global_queue_num, info->queue_id);
663 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
666 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
667 info->max_pkt_size < ETHER_MIN_LEN)
670 if (info->splithdr_enabled) {
671 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
674 rxq.hsplit_0 = info->rx_split_pos &
675 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
676 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
677 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
678 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
679 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
684 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
685 if (status != I40E_SUCCESS)
688 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
689 rxq.qlen = info->ring_len;
691 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
697 rxq.rxmax = info->max_pkt_size;
698 rxq.tphrdesc_ena = 1;
699 rxq.tphwdesc_ena = 1;
705 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
706 if (status != I40E_SUCCESS)
709 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
715 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
718 struct i40e_virtchnl_vsi_queue_config_info *info;
719 struct i40e_virtchnl_queue_pair_info *pair;
720 uint16_t expected_msg_size;
723 if (msg_size < sizeof(*info)) {
724 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
730 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
731 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
732 vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
733 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
738 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
739 if (msg_size != expected_msg_size) {
740 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
741 vf->vf_num, msg_size, expected_msg_size);
742 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
747 if (info->vsi_id != vf->vsi.vsi_num) {
748 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
749 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
750 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
755 for (i = 0; i < info->num_queue_pairs; i++) {
756 pair = &info->qpair[i];
758 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
759 pair->rxq.vsi_id != vf->vsi.vsi_num ||
760 pair->txq.queue_id != pair->rxq.queue_id ||
761 pair->txq.queue_id >= vf->vsi.num_queues) {
763 i40e_send_vf_nack(pf, vf,
764 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
768 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
769 i40e_send_vf_nack(pf, vf,
770 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
774 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
775 i40e_send_vf_nack(pf, vf,
776 I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
781 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
785 ixl_vf_set_qctl(struct ixl_pf *pf,
786 const struct i40e_virtchnl_vector_map *vector,
787 enum i40e_queue_type cur_type, uint16_t cur_queue,
788 enum i40e_queue_type *last_type, uint16_t *last_queue)
790 uint32_t offset, qctl;
793 if (cur_type == I40E_QUEUE_TYPE_RX) {
794 offset = I40E_QINT_RQCTL(cur_queue);
795 itr_indx = vector->rxitr_idx;
797 offset = I40E_QINT_TQCTL(cur_queue);
798 itr_indx = vector->txitr_idx;
801 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
802 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
803 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
804 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
805 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
807 wr32(&pf->hw, offset, qctl);
809 *last_type = cur_type;
810 *last_queue = cur_queue;
814 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
815 const struct i40e_virtchnl_vector_map *vector)
819 enum i40e_queue_type type, last_type;
821 uint16_t rxq_map, txq_map, cur_queue, last_queue;
825 rxq_map = vector->rxq_map;
826 txq_map = vector->txq_map;
828 last_queue = IXL_END_OF_INTR_LNKLST;
829 last_type = I40E_QUEUE_TYPE_RX;
832 * The datasheet says to optimize performance, RX queues and TX queues
833 * should be interleaved in the interrupt linked list, so we process
836 while ((rxq_map != 0) || (txq_map != 0)) {
838 qindex = ffs(txq_map) - 1;
839 type = I40E_QUEUE_TYPE_TX;
840 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
841 ixl_vf_set_qctl(pf, vector, type, cur_queue,
842 &last_type, &last_queue);
843 txq_map &= ~(1 << qindex);
847 qindex = ffs(rxq_map) - 1;
848 type = I40E_QUEUE_TYPE_RX;
849 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
850 ixl_vf_set_qctl(pf, vector, type, cur_queue,
851 &last_type, &last_queue);
852 rxq_map &= ~(1 << qindex);
856 if (vector->vector_id == 0)
857 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
859 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
862 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
863 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
869 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
872 struct i40e_virtchnl_irq_map_info *map;
873 struct i40e_virtchnl_vector_map *vector;
875 int i, largest_txq, largest_rxq;
879 if (msg_size < sizeof(*map)) {
880 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
886 if (map->num_vectors == 0) {
887 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
892 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
893 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
898 for (i = 0; i < map->num_vectors; i++) {
899 vector = &map->vecmap[i];
901 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
902 vector->vsi_id != vf->vsi.vsi_num) {
903 i40e_send_vf_nack(pf, vf,
904 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
908 if (vector->rxq_map != 0) {
909 largest_rxq = fls(vector->rxq_map) - 1;
910 if (largest_rxq >= vf->vsi.num_queues) {
911 i40e_send_vf_nack(pf, vf,
912 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
918 if (vector->txq_map != 0) {
919 largest_txq = fls(vector->txq_map) - 1;
920 if (largest_txq >= vf->vsi.num_queues) {
921 i40e_send_vf_nack(pf, vf,
922 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
928 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
929 vector->txitr_idx > IXL_MAX_ITR_IDX) {
930 i40e_send_vf_nack(pf, vf,
931 I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
936 ixl_vf_config_vector(pf, vf, vector);
939 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
943 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
946 struct i40e_virtchnl_queue_select *select;
949 if (msg_size != sizeof(*select)) {
950 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
956 if (select->vsi_id != vf->vsi.vsi_num ||
957 select->rx_queues == 0 || select->tx_queues == 0) {
958 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
963 /* Enable TX rings selected by the VF */
964 for (int i = 0; i < 32; i++) {
965 if ((1 << i) & select->tx_queues) {
966 /* Warn if queue is out of VF allocation range */
967 if (i >= vf->vsi.num_queues) {
968 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
972 /* Skip this queue if it hasn't been configured */
973 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
975 /* Warn if this queue is already marked as enabled */
976 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
977 device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
980 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
984 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
988 /* Enable RX rings selected by the VF */
989 for (int i = 0; i < 32; i++) {
990 if ((1 << i) & select->rx_queues) {
991 /* Warn if queue is out of VF allocation range */
992 if (i >= vf->vsi.num_queues) {
993 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
997 /* Skip this queue if it hasn't been configured */
998 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1000 /* Warn if this queue is already marked as enabled */
1001 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
1002 device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
1004 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
1008 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
1013 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1018 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
1022 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1023 void *msg, uint16_t msg_size)
1025 struct i40e_virtchnl_queue_select *select;
1028 if (msg_size != sizeof(*select)) {
1029 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1035 if (select->vsi_id != vf->vsi.vsi_num ||
1036 select->rx_queues == 0 || select->tx_queues == 0) {
1037 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1042 /* Disable TX rings selected by the VF */
1043 for (int i = 0; i < 32; i++) {
1044 if ((1 << i) & select->tx_queues) {
1045 /* Warn if queue is out of VF allocation range */
1046 if (i >= vf->vsi.num_queues) {
1047 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1051 /* Skip this queue if it hasn't been configured */
1052 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1054 /* Warn if this queue is already marked as disabled */
1055 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1056 device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1060 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1064 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1068 /* Enable RX rings selected by the VF */
1069 for (int i = 0; i < 32; i++) {
1070 if ((1 << i) & select->rx_queues) {
1071 /* Warn if queue is out of VF allocation range */
1072 if (i >= vf->vsi.num_queues) {
1073 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1077 /* Skip this queue if it hasn't been configured */
1078 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1080 /* Warn if this queue is already marked as disabled */
1081 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1082 device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1086 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1090 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1095 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1100 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
1104 ixl_zero_mac(const uint8_t *addr)
1106 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1108 return (cmp_etheraddr(addr, zero));
1112 ixl_bcast_mac(const uint8_t *addr)
1115 return (cmp_etheraddr(addr, ixl_bcast_addr));
1119 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1122 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1126 * If the VF is not allowed to change its MAC address, don't let it
1127 * set a MAC filter for an address that is not a multicast address and
1128 * is not its assigned MAC.
1130 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1131 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1138 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1141 struct i40e_virtchnl_ether_addr_list *addr_list;
1142 struct i40e_virtchnl_ether_addr *addr;
1143 struct ixl_vsi *vsi;
1145 size_t expected_size;
1149 if (msg_size < sizeof(*addr_list)) {
1150 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1156 expected_size = sizeof(*addr_list) +
1157 addr_list->num_elements * sizeof(*addr);
1159 if (addr_list->num_elements == 0 ||
1160 addr_list->vsi_id != vsi->vsi_num ||
1161 msg_size != expected_size) {
1162 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1167 for (i = 0; i < addr_list->num_elements; i++) {
1168 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1169 i40e_send_vf_nack(pf, vf,
1170 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1175 for (i = 0; i < addr_list->num_elements; i++) {
1176 addr = &addr_list->list[i];
1177 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1180 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
1184 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1187 struct i40e_virtchnl_ether_addr_list *addr_list;
1188 struct i40e_virtchnl_ether_addr *addr;
1189 size_t expected_size;
1192 if (msg_size < sizeof(*addr_list)) {
1193 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1199 expected_size = sizeof(*addr_list) +
1200 addr_list->num_elements * sizeof(*addr);
1202 if (addr_list->num_elements == 0 ||
1203 addr_list->vsi_id != vf->vsi.vsi_num ||
1204 msg_size != expected_size) {
1205 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1210 for (i = 0; i < addr_list->num_elements; i++) {
1211 addr = &addr_list->list[i];
1212 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1213 i40e_send_vf_nack(pf, vf,
1214 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1219 for (i = 0; i < addr_list->num_elements; i++) {
1220 addr = &addr_list->list[i];
1221 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1224 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
1227 static enum i40e_status_code
1228 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1230 struct i40e_vsi_context vsi_ctx;
1232 vsi_ctx.seid = vf->vsi.seid;
1234 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1235 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1236 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1237 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1238 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1242 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1245 struct i40e_virtchnl_vlan_filter_list *filter_list;
1246 enum i40e_status_code code;
1247 size_t expected_size;
1250 if (msg_size < sizeof(*filter_list)) {
1251 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1257 expected_size = sizeof(*filter_list) +
1258 filter_list->num_elements * sizeof(uint16_t);
1259 if (filter_list->num_elements == 0 ||
1260 filter_list->vsi_id != vf->vsi.vsi_num ||
1261 msg_size != expected_size) {
1262 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1267 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1268 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1273 for (i = 0; i < filter_list->num_elements; i++) {
1274 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1275 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1281 code = ixl_vf_enable_vlan_strip(pf, vf);
1282 if (code != I40E_SUCCESS) {
1283 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1287 for (i = 0; i < filter_list->num_elements; i++)
1288 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1290 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
1294 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1297 struct i40e_virtchnl_vlan_filter_list *filter_list;
1299 size_t expected_size;
1301 if (msg_size < sizeof(*filter_list)) {
1302 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1308 expected_size = sizeof(*filter_list) +
1309 filter_list->num_elements * sizeof(uint16_t);
1310 if (filter_list->num_elements == 0 ||
1311 filter_list->vsi_id != vf->vsi.vsi_num ||
1312 msg_size != expected_size) {
1313 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1318 for (i = 0; i < filter_list->num_elements; i++) {
1319 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1320 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1326 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1327 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1332 for (i = 0; i < filter_list->num_elements; i++)
1333 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1335 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
1339 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1340 void *msg, uint16_t msg_size)
1342 struct i40e_virtchnl_promisc_info *info;
1343 enum i40e_status_code code;
1345 if (msg_size != sizeof(*info)) {
1346 i40e_send_vf_nack(pf, vf,
1347 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1351 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1352 i40e_send_vf_nack(pf, vf,
1353 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1358 if (info->vsi_id != vf->vsi.vsi_num) {
1359 i40e_send_vf_nack(pf, vf,
1360 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1364 code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1365 info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1366 if (code != I40E_SUCCESS) {
1367 i40e_send_vf_nack(pf, vf,
1368 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1372 code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1373 info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
1374 if (code != I40E_SUCCESS) {
1375 i40e_send_vf_nack(pf, vf,
1376 I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1380 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1384 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1387 struct i40e_virtchnl_queue_select *queue;
1389 if (msg_size != sizeof(*queue)) {
1390 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1396 if (queue->vsi_id != vf->vsi.vsi_num) {
1397 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1402 ixl_update_eth_stats(&vf->vsi);
1404 ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1405 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1409 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1413 struct i40e_virtchnl_rss_key *key;
1414 struct i40e_aqc_get_set_rss_key_data key_data;
1415 enum i40e_status_code status;
1419 if (msg_size < sizeof(*key)) {
1420 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1427 if (key->key_len > 52) {
1428 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1429 vf->vf_num, key->key_len, 52);
1430 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1435 if (key->vsi_id != vf->vsi.vsi_num) {
1436 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1437 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1438 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1443 /* Fill out hash using MAC-dependent method */
1444 if (hw->mac.type == I40E_MAC_X722) {
1445 bzero(&key_data, sizeof(key_data));
1446 if (key->key_len <= 40)
1447 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1449 bcopy(key->key, key_data.standard_rss_key, 40);
1450 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1452 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1454 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1455 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1456 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1457 I40E_ERR_ADMIN_QUEUE_ERROR);
1461 for (int i = 0; i < (key->key_len / 4); i++)
1462 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1465 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1466 vf->vf_num, key->key[0]);
1468 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
1472 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1476 struct i40e_virtchnl_rss_lut *lut;
1477 enum i40e_status_code status;
1481 if (msg_size < sizeof(*lut)) {
1482 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1489 if (lut->lut_entries > 64) {
1490 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1491 vf->vf_num, lut->lut_entries, 64);
1492 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1497 if (lut->vsi_id != vf->vsi.vsi_num) {
1498 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1499 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1500 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1505 /* Fill out LUT using MAC-dependent method */
1506 if (hw->mac.type == I40E_MAC_X722) {
1507 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1509 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1510 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1511 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1512 I40E_ERR_ADMIN_QUEUE_ERROR);
1516 for (int i = 0; i < (lut->lut_entries / 4); i++)
1517 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1520 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1521 vf->vf_num, lut->lut[0], lut->lut_entries);
1523 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
1527 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1531 struct i40e_virtchnl_rss_hena *hena;
1535 if (msg_size < sizeof(*hena)) {
1536 i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
1544 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1545 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1547 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1548 vf->vf_num, hena->hena);
1550 ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
1554 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1558 uint16_t vf_num, msg_size;
1561 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1562 opcode = le32toh(event->desc.cookie_high);
1564 if (vf_num >= pf->num_vfs) {
1565 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1569 vf = &pf->vfs[vf_num];
1570 msg = event->msg_buf;
1571 msg_size = event->msg_len;
1573 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1574 "Got msg %s(%d) from%sVF-%d of size %d\n",
1575 ixl_vc_opcode_str(opcode), opcode,
1576 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1579 /* This must be a stray msg from a previously destroyed VF. */
1580 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1584 case I40E_VIRTCHNL_OP_VERSION:
1585 ixl_vf_version_msg(pf, vf, msg, msg_size);
1587 case I40E_VIRTCHNL_OP_RESET_VF:
1588 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1590 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1591 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1593 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1594 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1596 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1597 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1599 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1600 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1602 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1603 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1605 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1606 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1608 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1609 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1611 case I40E_VIRTCHNL_OP_ADD_VLAN:
1612 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1614 case I40E_VIRTCHNL_OP_DEL_VLAN:
1615 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1617 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1618 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1620 case I40E_VIRTCHNL_OP_GET_STATS:
1621 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1623 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1624 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1626 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1627 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1629 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1630 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1633 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1634 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1635 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1637 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1642 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1644 ixl_handle_vflr(void *arg, int pending)
1649 uint16_t global_vf_num;
1650 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1657 for (i = 0; i < pf->num_vfs; i++) {
1658 global_vf_num = hw->func_caps.vf_base_id + i;
1661 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1664 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1665 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1666 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1667 if (vflrstat & vflrstat_mask) {
1668 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1671 ixl_reinit_vf(pf, vf);
1675 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1676 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1677 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1684 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1688 case I40E_AQ_RC_EPERM:
1690 case I40E_AQ_RC_ENOENT:
1692 case I40E_AQ_RC_ESRCH:
1694 case I40E_AQ_RC_EINTR:
1696 case I40E_AQ_RC_EIO:
1698 case I40E_AQ_RC_ENXIO:
1700 case I40E_AQ_RC_E2BIG:
1702 case I40E_AQ_RC_EAGAIN:
1704 case I40E_AQ_RC_ENOMEM:
1706 case I40E_AQ_RC_EACCES:
1708 case I40E_AQ_RC_EFAULT:
1710 case I40E_AQ_RC_EBUSY:
1712 case I40E_AQ_RC_EEXIST:
1714 case I40E_AQ_RC_EINVAL:
1716 case I40E_AQ_RC_ENOTTY:
1718 case I40E_AQ_RC_ENOSPC:
1720 case I40E_AQ_RC_ENOSYS:
1722 case I40E_AQ_RC_ERANGE:
1724 case I40E_AQ_RC_EFLUSHED:
1725 return (EINVAL); /* No exact equivalent in errno.h */
1726 case I40E_AQ_RC_BAD_ADDR:
1728 case I40E_AQ_RC_EMODE:
1730 case I40E_AQ_RC_EFBIG:
1738 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1742 struct ixl_vsi *pf_vsi;
1743 enum i40e_status_code ret;
1746 pf = device_get_softc(dev);
1751 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1754 if (pf->vfs == NULL) {
1759 for (i = 0; i < num_vfs; i++)
1760 sysctl_ctx_init(&pf->vfs[i].ctx);
1762 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1763 1, FALSE, &pf->veb_seid, FALSE, NULL);
1764 if (ret != I40E_SUCCESS) {
1765 error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1766 device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1771 ixl_enable_adminq(hw);
1773 pf->num_vfs = num_vfs;
1778 free(pf->vfs, M_IXL);
1785 ixl_iov_uninit(device_t dev)
1789 struct ixl_vsi *vsi;
1794 pf = device_get_softc(dev);
1800 for (i = 0; i < pf->num_vfs; i++) {
1801 if (pf->vfs[i].vsi.seid != 0)
1802 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1803 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1804 DDPRINTF(dev, "VF %d: %d released\n",
1805 i, pf->vfs[i].qtag.num_allocated);
1806 DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1809 if (pf->veb_seid != 0) {
1810 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1814 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1815 ixl_disable_intr(vsi);
1820 num_vfs = pf->num_vfs;
1826 /* Do this after the unlock as sysctl_ctx_free might sleep. */
1827 for (i = 0; i < num_vfs; i++)
1828 sysctl_ctx_free(&vfs[i].ctx);
1833 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1835 device_t dev = pf->dev;
1838 /* Validate, and clamp value if invalid */
1839 if (num_queues < 1 || num_queues > 16)
1840 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1841 num_queues, vf->vf_num);
1842 if (num_queues < 1) {
1843 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1845 } else if (num_queues > 16) {
1846 device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1849 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1851 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1852 num_queues, vf->vf_num);
1856 DDPRINTF(dev, "VF %d: %d allocated, %d active",
1857 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1858 DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1864 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1866 char sysctl_name[QUEUE_NAME_LEN];
1874 pf = device_get_softc(dev);
1875 vf = &pf->vfs[vfnum];
1881 vf->vf_flags = VF_FLAG_ENABLED;
1882 SLIST_INIT(&vf->vsi.ftl);
1884 /* Reserve queue allocation from PF */
1885 vf_num_queues = nvlist_get_number(params, "num-queues");
1886 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1890 error = ixl_vf_setup_vsi(pf, vf);
1894 if (nvlist_exists_binary(params, "mac-addr")) {
1895 mac = nvlist_get_binary(params, "mac-addr", &size);
1896 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1898 if (nvlist_get_bool(params, "allow-set-mac"))
1899 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1902 * If the administrator has not specified a MAC address then
1903 * we must allow the VF to choose one.
1905 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1907 if (nvlist_get_bool(params, "mac-anti-spoof"))
1908 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1910 if (nvlist_get_bool(params, "allow-promisc"))
1911 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1913 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1915 ixl_reset_vf(pf, vf);
1919 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1920 ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);