1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixl_pf_iov.h"
37 /* Private functions */
38 static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
42 static bool ixl_zero_mac(const uint8_t *addr);
43 static bool ixl_bcast_mac(const uint8_t *addr);
45 static int ixl_vc_opcode_level(uint16_t opcode);
47 static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
49 static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67 enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
80 static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
82 static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
85 * TODO: Move pieces of this into iflib and call the rest in a handler?
87 * e.g. ixl_if_iov_set_schema
89 * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
93 ixl_initialize_sriov(struct ixl_pf *pf)
95 device_t dev = pf->dev;
96 struct i40e_hw *hw = &pf->hw;
97 nvlist_t *pf_schema, *vf_schema;
100 pf_schema = pci_iov_schema_alloc_node();
101 vf_schema = pci_iov_schema_alloc_node();
102 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
103 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
104 IOV_SCHEMA_HASDEFAULT, TRUE);
105 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
106 IOV_SCHEMA_HASDEFAULT, FALSE);
107 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
108 IOV_SCHEMA_HASDEFAULT, FALSE);
109 pci_iov_schema_add_uint16(vf_schema, "num-queues",
110 IOV_SCHEMA_HASDEFAULT,
111 max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
113 iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
114 if (iov_error != 0) {
116 "Failed to initialize SR-IOV (error=%d)\n",
119 device_printf(dev, "SR-IOV ready\n");
124 * Allocate the VSI for a VF.
127 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
132 struct i40e_vsi_context vsi_ctx;
134 enum i40e_status_code code;
140 vsi_ctx.pf_num = hw->pf_id;
141 vsi_ctx.uplink_seid = pf->veb_seid;
142 vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
143 vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
144 vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
146 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
148 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
149 if (pf->enable_vf_loopback)
150 vsi_ctx.info.switch_id =
151 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
153 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
154 vsi_ctx.info.sec_flags = 0;
155 if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
156 vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
158 vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
159 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
160 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
162 vsi_ctx.info.valid_sections |=
163 htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
164 vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
166 /* XXX: Only scattered allocation is supported for VFs right now */
167 for (i = 0; i < vf->qtag.num_active; i++)
168 vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
169 for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
170 vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
172 vsi_ctx.info.tc_mapping[0] = htole16(
173 (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
174 ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
176 code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
177 if (code != I40E_SUCCESS)
178 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
179 vf->vsi.seid = vsi_ctx.seid;
180 vf->vsi.vsi_num = vsi_ctx.vsi_number;
181 vf->vsi.num_rx_queues = vf->qtag.num_active;
182 vf->vsi.num_tx_queues = vf->qtag.num_active;
184 code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
185 if (code != I40E_SUCCESS)
186 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
189 if (code != I40E_SUCCESS) {
190 device_printf(dev, "Failed to disable BW limit: %d\n",
191 ixl_adminq_err_to_errno(hw->aq.asq_last_status));
192 return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
195 memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
200 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
207 error = ixl_vf_alloc_vsi(pf, vf);
211 /* Let VF receive broadcast Ethernet frames */
212 error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
214 device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
215 /* Re-add VF's MAC/VLAN filters to its VSI */
216 ixl_reconfigure_filters(&vf->vsi);
218 vf->vsi.hw_filters_add = 0;
219 vf->vsi.hw_filters_del = 0;
225 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
232 * Two queues are mapped in a single register, so we have to do some
233 * gymnastics to convert the queue number into a register index and
237 shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
239 qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
240 qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
241 qtable |= val << shift;
242 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
246 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
255 * Contiguous mappings aren't actually supported by the hardware,
256 * so we have to use non-contiguous mappings.
258 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
259 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
261 /* Enable LAN traffic on this VF */
262 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
263 I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
265 /* Program index of each VF queue into PF queue space
266 * (This is only needed if QTABLE is enabled) */
267 for (i = 0; i < vf->vsi.num_tx_queues; i++) {
268 qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
269 I40E_VPLAN_QTABLE_QINDEX_SHIFT;
271 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
273 for (; i < IXL_MAX_VSI_QUEUES; i++)
274 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
275 I40E_VPLAN_QTABLE_QINDEX_MASK);
277 /* Map queues allocated to VF to its VSI;
278 * This mapping matches the VF-wide mapping since the VF
279 * is only given a single VSI */
280 for (i = 0; i < vf->vsi.num_tx_queues; i++)
281 ixl_vf_map_vsi_queue(hw, vf, i,
282 ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
284 /* Set rest of VSI queues as unused. */
285 for (; i < IXL_MAX_VSI_QUEUES; i++)
286 ixl_vf_map_vsi_queue(hw, vf, i,
287 I40E_VSILAN_QTABLE_QINDEX_0_MASK);
293 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
302 i40e_aq_delete_element(hw, vsi->seid, NULL);
306 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
309 wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
314 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
317 wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
318 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
323 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
326 uint32_t vfint_reg, vpint_reg;
331 ixl_vf_vsi_release(pf, &vf->vsi);
333 /* Index 0 has a special register. */
334 ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
336 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
337 vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
338 ixl_vf_disable_queue_intr(hw, vfint_reg);
341 /* Index 0 has a special register. */
342 ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
344 for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
345 vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
346 ixl_vf_unregister_intr(hw, vpint_reg);
349 vf->vsi.num_tx_queues = 0;
350 vf->vsi.num_rx_queues = 0;
354 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
358 uint16_t global_vf_num;
362 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
364 wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
365 (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
366 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
367 ciad = rd32(hw, I40E_PF_PCI_CIAD);
368 if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
377 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
384 ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
386 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
387 vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
388 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
391 ixl_reinit_vf(pf, vf);
393 ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
397 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
400 uint32_t vfrstat, vfrtrig;
405 error = ixl_flush_pcie(pf, vf);
407 device_printf(pf->dev,
408 "Timed out waiting for PCIe activity to stop on VF-%d\n",
411 for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
414 vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
415 if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
419 if (i == IXL_VF_RESET_TIMEOUT)
420 device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
422 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
424 vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
425 vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
426 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
428 if (vf->vsi.seid != 0)
429 ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
430 ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
432 ixl_vf_release_resources(pf, vf);
433 ixl_vf_setup_vsi(pf, vf);
434 ixl_vf_map_queues(pf, vf);
436 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
441 ixl_vc_opcode_level(uint16_t opcode)
444 case VIRTCHNL_OP_GET_STATS:
452 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
453 enum i40e_status_code status, void *msg, uint16_t len)
459 global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
461 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
462 "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
463 ixl_vc_opcode_str(op), op, status, vf->vf_num);
465 i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
469 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
472 ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
476 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
477 enum i40e_status_code status, const char *file, int line)
481 "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
482 ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
483 status, vf->vf_num, file, line);
484 ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
488 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
491 struct virtchnl_version_info reply;
493 if (msg_size != sizeof(struct virtchnl_version_info)) {
494 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
499 vf->version = ((struct virtchnl_version_info *)msg)->minor;
501 reply.major = VIRTCHNL_VERSION_MAJOR;
502 reply.minor = VIRTCHNL_VERSION_MINOR;
503 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
508 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
513 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
518 ixl_reset_vf(pf, vf);
520 /* No response to a reset message. */
524 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
527 struct virtchnl_vf_resource reply;
529 if ((vf->version == 0 && msg_size != 0) ||
530 (vf->version == 1 && msg_size != 4)) {
531 device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
532 " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
534 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
539 bzero(&reply, sizeof(reply));
541 if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
542 reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
543 VIRTCHNL_VF_OFFLOAD_RSS_REG |
544 VIRTCHNL_VF_OFFLOAD_VLAN;
546 /* Force VF RSS setup by PF in 1.1+ VFs */
547 reply.vf_cap_flags = *(u32 *)msg & (
548 VIRTCHNL_VF_OFFLOAD_L2 |
549 VIRTCHNL_VF_OFFLOAD_RSS_PF |
550 VIRTCHNL_VF_OFFLOAD_VLAN);
553 reply.num_queue_pairs = vf->vsi.num_tx_queues;
554 reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
555 reply.rss_key_size = 52;
556 reply.rss_lut_size = 64;
557 reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
558 reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
559 reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
560 memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
562 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
563 I40E_SUCCESS, &reply, sizeof(reply));
567 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
568 struct virtchnl_txq_info *info)
571 struct i40e_hmc_obj_txq txq;
572 uint16_t global_queue_num, global_vf_num;
573 enum i40e_status_code status;
577 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
578 global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
579 bzero(&txq, sizeof(txq));
581 DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
582 vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
584 status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
585 if (status != I40E_SUCCESS)
588 txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
590 txq.head_wb_ena = info->headwb_enabled;
591 txq.head_wb_addr = info->dma_headwb_addr;
592 txq.qlen = info->ring_len;
593 txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
596 status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
597 if (status != I40E_SUCCESS)
600 qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
601 (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
602 (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
603 wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
606 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
612 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
613 struct virtchnl_rxq_info *info)
616 struct i40e_hmc_obj_rxq rxq;
617 uint16_t global_queue_num;
618 enum i40e_status_code status;
621 global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
622 bzero(&rxq, sizeof(rxq));
624 DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
625 vf->vf_num, global_queue_num, info->queue_id);
627 if (info->databuffer_size > IXL_VF_MAX_BUFFER)
630 if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
631 info->max_pkt_size < ETHER_MIN_LEN)
634 if (info->splithdr_enabled) {
635 if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
638 rxq.hsplit_0 = info->rx_split_pos &
639 (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
640 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
641 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
642 I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
643 rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
648 status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
649 if (status != I40E_SUCCESS)
652 rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
653 rxq.qlen = info->ring_len;
655 rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
661 rxq.rxmax = info->max_pkt_size;
662 rxq.tphrdesc_ena = 1;
663 rxq.tphwdesc_ena = 1;
669 status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
670 if (status != I40E_SUCCESS)
673 ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
679 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
682 struct virtchnl_vsi_queue_config_info *info;
683 struct virtchnl_queue_pair_info *pair;
684 uint16_t expected_msg_size;
687 if (msg_size < sizeof(*info)) {
688 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
694 if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
695 device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
696 vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
697 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
702 expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
703 if (msg_size != expected_msg_size) {
704 device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
705 vf->vf_num, msg_size, expected_msg_size);
706 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
711 if (info->vsi_id != vf->vsi.vsi_num) {
712 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
713 vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
714 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
719 for (i = 0; i < info->num_queue_pairs; i++) {
720 pair = &info->qpair[i];
722 if (pair->txq.vsi_id != vf->vsi.vsi_num ||
723 pair->rxq.vsi_id != vf->vsi.vsi_num ||
724 pair->txq.queue_id != pair->rxq.queue_id ||
725 pair->txq.queue_id >= vf->vsi.num_tx_queues) {
727 i40e_send_vf_nack(pf, vf,
728 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
732 if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
733 i40e_send_vf_nack(pf, vf,
734 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
738 if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
739 i40e_send_vf_nack(pf, vf,
740 VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
745 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
749 ixl_vf_set_qctl(struct ixl_pf *pf,
750 const struct virtchnl_vector_map *vector,
751 enum i40e_queue_type cur_type, uint16_t cur_queue,
752 enum i40e_queue_type *last_type, uint16_t *last_queue)
754 uint32_t offset, qctl;
757 if (cur_type == I40E_QUEUE_TYPE_RX) {
758 offset = I40E_QINT_RQCTL(cur_queue);
759 itr_indx = vector->rxitr_idx;
761 offset = I40E_QINT_TQCTL(cur_queue);
762 itr_indx = vector->txitr_idx;
765 qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
766 (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
767 (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
768 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
769 (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
771 wr32(&pf->hw, offset, qctl);
773 *last_type = cur_type;
774 *last_queue = cur_queue;
778 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
779 const struct virtchnl_vector_map *vector)
783 enum i40e_queue_type type, last_type;
785 uint16_t rxq_map, txq_map, cur_queue, last_queue;
789 rxq_map = vector->rxq_map;
790 txq_map = vector->txq_map;
792 last_queue = IXL_END_OF_INTR_LNKLST;
793 last_type = I40E_QUEUE_TYPE_RX;
796 * The datasheet says to optimize performance, RX queues and TX queues
797 * should be interleaved in the interrupt linked list, so we process
800 while ((rxq_map != 0) || (txq_map != 0)) {
802 qindex = ffs(txq_map) - 1;
803 type = I40E_QUEUE_TYPE_TX;
804 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
805 ixl_vf_set_qctl(pf, vector, type, cur_queue,
806 &last_type, &last_queue);
807 txq_map &= ~(1 << qindex);
811 qindex = ffs(rxq_map) - 1;
812 type = I40E_QUEUE_TYPE_RX;
813 cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
814 ixl_vf_set_qctl(pf, vector, type, cur_queue,
815 &last_type, &last_queue);
816 rxq_map &= ~(1 << qindex);
820 if (vector->vector_id == 0)
821 lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
823 lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
826 (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
827 (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
833 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
836 struct virtchnl_irq_map_info *map;
837 struct virtchnl_vector_map *vector;
839 int i, largest_txq, largest_rxq;
843 if (msg_size < sizeof(*map)) {
844 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
850 if (map->num_vectors == 0) {
851 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
856 if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
857 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
862 for (i = 0; i < map->num_vectors; i++) {
863 vector = &map->vecmap[i];
865 if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
866 vector->vsi_id != vf->vsi.vsi_num) {
867 i40e_send_vf_nack(pf, vf,
868 VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
872 if (vector->rxq_map != 0) {
873 largest_rxq = fls(vector->rxq_map) - 1;
874 if (largest_rxq >= vf->vsi.num_rx_queues) {
875 i40e_send_vf_nack(pf, vf,
876 VIRTCHNL_OP_CONFIG_IRQ_MAP,
882 if (vector->txq_map != 0) {
883 largest_txq = fls(vector->txq_map) - 1;
884 if (largest_txq >= vf->vsi.num_tx_queues) {
885 i40e_send_vf_nack(pf, vf,
886 VIRTCHNL_OP_CONFIG_IRQ_MAP,
892 if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
893 vector->txitr_idx > IXL_MAX_ITR_IDX) {
894 i40e_send_vf_nack(pf, vf,
895 VIRTCHNL_OP_CONFIG_IRQ_MAP,
900 ixl_vf_config_vector(pf, vf, vector);
903 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
907 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
910 struct virtchnl_queue_select *select;
913 if (msg_size != sizeof(*select)) {
914 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
920 if (select->vsi_id != vf->vsi.vsi_num ||
921 select->rx_queues == 0 || select->tx_queues == 0) {
922 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
927 /* Enable TX rings selected by the VF */
928 for (int i = 0; i < 32; i++) {
929 if ((1 << i) & select->tx_queues) {
930 /* Warn if queue is out of VF allocation range */
931 if (i >= vf->vsi.num_tx_queues) {
932 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
936 /* Skip this queue if it hasn't been configured */
937 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
939 /* Warn if this queue is already marked as enabled */
940 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
941 ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
944 error = ixl_enable_tx_ring(pf, &vf->qtag, i);
948 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
952 /* Enable RX rings selected by the VF */
953 for (int i = 0; i < 32; i++) {
954 if ((1 << i) & select->rx_queues) {
955 /* Warn if queue is out of VF allocation range */
956 if (i >= vf->vsi.num_rx_queues) {
957 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
961 /* Skip this queue if it hasn't been configured */
962 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
964 /* Warn if this queue is already marked as enabled */
965 if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
966 ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
968 error = ixl_enable_rx_ring(pf, &vf->qtag, i);
972 ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
977 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
982 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
986 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
987 void *msg, uint16_t msg_size)
989 struct virtchnl_queue_select *select;
992 if (msg_size != sizeof(*select)) {
993 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
999 if (select->vsi_id != vf->vsi.vsi_num ||
1000 select->rx_queues == 0 || select->tx_queues == 0) {
1001 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1006 /* Disable TX rings selected by the VF */
1007 for (int i = 0; i < 32; i++) {
1008 if ((1 << i) & select->tx_queues) {
1009 /* Warn if queue is out of VF allocation range */
1010 if (i >= vf->vsi.num_tx_queues) {
1011 device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1015 /* Skip this queue if it hasn't been configured */
1016 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1018 /* Warn if this queue is already marked as disabled */
1019 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1020 ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
1024 error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1028 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1032 /* Enable RX rings selected by the VF */
1033 for (int i = 0; i < 32; i++) {
1034 if ((1 << i) & select->rx_queues) {
1035 /* Warn if queue is out of VF allocation range */
1036 if (i >= vf->vsi.num_rx_queues) {
1037 device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1041 /* Skip this queue if it hasn't been configured */
1042 if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1044 /* Warn if this queue is already marked as disabled */
1045 if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1046 ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
1050 error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1054 ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1059 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1064 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1068 ixl_zero_mac(const uint8_t *addr)
1070 uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1072 return (cmp_etheraddr(addr, zero));
1076 ixl_bcast_mac(const uint8_t *addr)
1078 static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
1079 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1081 return (cmp_etheraddr(addr, ixl_bcast_addr));
1085 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1088 if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1092 * If the VF is not allowed to change its MAC address, don't let it
1093 * set a MAC filter for an address that is not a multicast address and
1094 * is not its assigned MAC.
1096 if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1097 !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1104 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1107 struct virtchnl_ether_addr_list *addr_list;
1108 struct virtchnl_ether_addr *addr;
1109 struct ixl_vsi *vsi;
1111 size_t expected_size;
1115 if (msg_size < sizeof(*addr_list)) {
1116 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1122 expected_size = sizeof(*addr_list) +
1123 addr_list->num_elements * sizeof(*addr);
1125 if (addr_list->num_elements == 0 ||
1126 addr_list->vsi_id != vsi->vsi_num ||
1127 msg_size != expected_size) {
1128 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1133 for (i = 0; i < addr_list->num_elements; i++) {
1134 if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1135 i40e_send_vf_nack(pf, vf,
1136 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1141 for (i = 0; i < addr_list->num_elements; i++) {
1142 addr = &addr_list->list[i];
1143 ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1146 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1150 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1153 struct virtchnl_ether_addr_list *addr_list;
1154 struct virtchnl_ether_addr *addr;
1155 size_t expected_size;
1158 if (msg_size < sizeof(*addr_list)) {
1159 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1165 expected_size = sizeof(*addr_list) +
1166 addr_list->num_elements * sizeof(*addr);
1168 if (addr_list->num_elements == 0 ||
1169 addr_list->vsi_id != vf->vsi.vsi_num ||
1170 msg_size != expected_size) {
1171 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1176 for (i = 0; i < addr_list->num_elements; i++) {
1177 addr = &addr_list->list[i];
1178 if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1179 i40e_send_vf_nack(pf, vf,
1180 VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1185 for (i = 0; i < addr_list->num_elements; i++) {
1186 addr = &addr_list->list[i];
1187 ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1190 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1193 static enum i40e_status_code
1194 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1196 struct i40e_vsi_context vsi_ctx;
1198 vsi_ctx.seid = vf->vsi.seid;
1200 bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1201 vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1202 vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1203 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1204 return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1208 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1211 struct virtchnl_vlan_filter_list *filter_list;
1212 enum i40e_status_code code;
1213 size_t expected_size;
1216 if (msg_size < sizeof(*filter_list)) {
1217 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1223 expected_size = sizeof(*filter_list) +
1224 filter_list->num_elements * sizeof(uint16_t);
1225 if (filter_list->num_elements == 0 ||
1226 filter_list->vsi_id != vf->vsi.vsi_num ||
1227 msg_size != expected_size) {
1228 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1233 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1234 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1239 for (i = 0; i < filter_list->num_elements; i++) {
1240 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1241 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1247 code = ixl_vf_enable_vlan_strip(pf, vf);
1248 if (code != I40E_SUCCESS) {
1249 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1253 for (i = 0; i < filter_list->num_elements; i++)
1254 ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1256 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1260 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1263 struct virtchnl_vlan_filter_list *filter_list;
1265 size_t expected_size;
1267 if (msg_size < sizeof(*filter_list)) {
1268 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1274 expected_size = sizeof(*filter_list) +
1275 filter_list->num_elements * sizeof(uint16_t);
1276 if (filter_list->num_elements == 0 ||
1277 filter_list->vsi_id != vf->vsi.vsi_num ||
1278 msg_size != expected_size) {
1279 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1284 for (i = 0; i < filter_list->num_elements; i++) {
1285 if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1286 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1292 if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1293 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1298 for (i = 0; i < filter_list->num_elements; i++)
1299 ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1301 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1305 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1306 void *msg, uint16_t msg_size)
1308 struct virtchnl_promisc_info *info;
1309 struct i40e_hw *hw = &pf->hw;
1310 enum i40e_status_code code;
1312 if (msg_size != sizeof(*info)) {
1313 i40e_send_vf_nack(pf, vf,
1314 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1318 if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1320 * Do the same thing as the Linux PF driver -- lie to the VF
1322 ixl_send_vf_ack(pf, vf,
1323 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1328 if (info->vsi_id != vf->vsi.vsi_num) {
1329 i40e_send_vf_nack(pf, vf,
1330 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1334 code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1335 info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1336 if (code != I40E_SUCCESS) {
1337 device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1338 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1339 i40e_aq_str(hw, hw->aq.asq_last_status));
1340 i40e_send_vf_nack(pf, vf,
1341 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1345 code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1346 info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1347 if (code != I40E_SUCCESS) {
1348 device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1349 " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1350 i40e_aq_str(hw, hw->aq.asq_last_status));
1351 i40e_send_vf_nack(pf, vf,
1352 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1356 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1360 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1363 struct virtchnl_queue_select *queue;
1365 if (msg_size != sizeof(*queue)) {
1366 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1372 if (queue->vsi_id != vf->vsi.vsi_num) {
1373 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1378 ixl_update_eth_stats(&vf->vsi);
1380 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1381 I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1385 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1389 struct virtchnl_rss_key *key;
1390 struct i40e_aqc_get_set_rss_key_data key_data;
1391 enum i40e_status_code status;
1395 if (msg_size < sizeof(*key)) {
1396 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1403 if (key->key_len > 52) {
1404 device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1405 vf->vf_num, key->key_len, 52);
1406 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1411 if (key->vsi_id != vf->vsi.vsi_num) {
1412 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1413 vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1414 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1419 /* Fill out hash using MAC-dependent method */
1420 if (hw->mac.type == I40E_MAC_X722) {
1421 bzero(&key_data, sizeof(key_data));
1422 if (key->key_len <= 40)
1423 bcopy(key->key, key_data.standard_rss_key, key->key_len);
1425 bcopy(key->key, key_data.standard_rss_key, 40);
1426 bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1428 status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1430 device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1431 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1432 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1433 I40E_ERR_ADMIN_QUEUE_ERROR);
1437 for (int i = 0; i < (key->key_len / 4); i++)
1438 i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1441 DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1442 vf->vf_num, key->key[0]);
1444 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1448 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1452 struct virtchnl_rss_lut *lut;
1453 enum i40e_status_code status;
1457 if (msg_size < sizeof(*lut)) {
1458 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1465 if (lut->lut_entries > 64) {
1466 device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1467 vf->vf_num, lut->lut_entries, 64);
1468 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1473 if (lut->vsi_id != vf->vsi.vsi_num) {
1474 device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1475 vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1476 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1481 /* Fill out LUT using MAC-dependent method */
1482 if (hw->mac.type == I40E_MAC_X722) {
1483 status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1485 device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1486 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1487 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1488 I40E_ERR_ADMIN_QUEUE_ERROR);
1492 for (int i = 0; i < (lut->lut_entries / 4); i++)
1493 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1496 DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1497 vf->vf_num, lut->lut[0], lut->lut_entries);
1499 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1503 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1507 struct virtchnl_rss_hena *hena;
1511 if (msg_size < sizeof(*hena)) {
1512 i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1520 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1521 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1523 DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1524 vf->vf_num, hena->hena);
1526 ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1530 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1532 struct virtchnl_pf_event event;
1536 event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1537 event.severity = PF_EVENT_SEVERITY_INFO;
1538 event.event_data.link_event.link_status = pf->vsi.link_active;
1539 event.event_data.link_event.link_speed =
1540 (enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1542 ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1547 ixl_broadcast_link_state(struct ixl_pf *pf)
1551 for (i = 0; i < pf->num_vfs; i++)
1552 ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1556 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1560 uint16_t vf_num, msg_size;
1563 vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1564 opcode = le32toh(event->desc.cookie_high);
1566 if (vf_num >= pf->num_vfs) {
1567 device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1571 vf = &pf->vfs[vf_num];
1572 msg = event->msg_buf;
1573 msg_size = event->msg_len;
1575 I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1576 "Got msg %s(%d) from%sVF-%d of size %d\n",
1577 ixl_vc_opcode_str(opcode), opcode,
1578 (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1581 /* This must be a stray msg from a previously destroyed VF. */
1582 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1586 case VIRTCHNL_OP_VERSION:
1587 ixl_vf_version_msg(pf, vf, msg, msg_size);
1589 case VIRTCHNL_OP_RESET_VF:
1590 ixl_vf_reset_msg(pf, vf, msg, msg_size);
1592 case VIRTCHNL_OP_GET_VF_RESOURCES:
1593 ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1594 /* Notify VF of link state after it obtains queues, as this is
1595 * the last thing it will do as part of initialization
1597 ixl_notify_vf_link_state(pf, vf);
1599 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1600 ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1602 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1603 ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1605 case VIRTCHNL_OP_ENABLE_QUEUES:
1606 ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1607 /* Notify VF of link state after it obtains queues, as this is
1608 * the last thing it will do as part of initialization
1610 ixl_notify_vf_link_state(pf, vf);
1612 case VIRTCHNL_OP_DISABLE_QUEUES:
1613 ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1615 case VIRTCHNL_OP_ADD_ETH_ADDR:
1616 ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1618 case VIRTCHNL_OP_DEL_ETH_ADDR:
1619 ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1621 case VIRTCHNL_OP_ADD_VLAN:
1622 ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1624 case VIRTCHNL_OP_DEL_VLAN:
1625 ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1627 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1628 ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1630 case VIRTCHNL_OP_GET_STATS:
1631 ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1633 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1634 ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1636 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1637 ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1639 case VIRTCHNL_OP_SET_RSS_HENA:
1640 ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1643 /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1644 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1645 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1647 i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1652 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1654 ixl_handle_vflr(struct ixl_pf *pf)
1658 uint16_t global_vf_num;
1659 uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1664 ixl_dbg_iov(pf, "%s: begin\n", __func__);
1666 /* Re-enable VFLR interrupt cause so driver doesn't miss a
1667 * reset interrupt for another VF */
1668 icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1669 icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1670 wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1673 for (i = 0; i < pf->num_vfs; i++) {
1674 global_vf_num = hw->func_caps.vf_base_id + i;
1677 if (!(vf->vf_flags & VF_FLAG_ENABLED))
1680 vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1681 vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1682 vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1683 if (vflrstat & vflrstat_mask) {
1684 wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1687 ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1688 ixl_reinit_vf(pf, vf);
1689 ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1696 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1700 case I40E_AQ_RC_EPERM:
1702 case I40E_AQ_RC_ENOENT:
1704 case I40E_AQ_RC_ESRCH:
1706 case I40E_AQ_RC_EINTR:
1708 case I40E_AQ_RC_EIO:
1710 case I40E_AQ_RC_ENXIO:
1712 case I40E_AQ_RC_E2BIG:
1714 case I40E_AQ_RC_EAGAIN:
1716 case I40E_AQ_RC_ENOMEM:
1718 case I40E_AQ_RC_EACCES:
1720 case I40E_AQ_RC_EFAULT:
1722 case I40E_AQ_RC_EBUSY:
1724 case I40E_AQ_RC_EEXIST:
1726 case I40E_AQ_RC_EINVAL:
1728 case I40E_AQ_RC_ENOTTY:
1730 case I40E_AQ_RC_ENOSPC:
1732 case I40E_AQ_RC_ENOSYS:
1734 case I40E_AQ_RC_ERANGE:
1736 case I40E_AQ_RC_EFLUSHED:
1737 return (EINVAL); /* No exact equivalent in errno.h */
1738 case I40E_AQ_RC_BAD_ADDR:
1740 case I40E_AQ_RC_EMODE:
1742 case I40E_AQ_RC_EFBIG:
1750 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1752 struct i40e_hw *hw = &pf->hw;
1753 device_t dev = pf->dev;
1754 struct ixl_vsi *vsi = &pf->vsi;
1755 struct i40e_vsi_context ctxt;
1758 memset(&ctxt, 0, sizeof(ctxt));
1760 ctxt.seid = vsi->seid;
1761 if (pf->veb_seid != 0)
1762 ctxt.uplink_seid = pf->veb_seid;
1763 ctxt.pf_num = hw->pf_id;
1764 ctxt.connection_type = IXL_VSI_DATA_PORT;
1766 ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1767 ctxt.info.switch_id = (enable) ?
1768 htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1770 /* error is set to 0 on success */
1771 error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1773 device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1774 " aq_error %d\n", error, hw->aq.asq_last_status);
1781 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1783 struct ixl_pf *pf = iflib_get_softc(ctx);
1784 device_t dev = iflib_get_dev(ctx);
1786 struct ixl_vsi *pf_vsi;
1787 enum i40e_status_code ret;
1793 pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1795 if (pf->vfs == NULL) {
1800 for (i = 0; i < num_vfs; i++)
1801 sysctl_ctx_init(&pf->vfs[i].ctx);
1804 * Add the VEB and ...
1805 * - do nothing: VEPA mode
1806 * - enable loopback mode on connected VSIs: VEB mode
1808 ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1809 1, FALSE, &pf->veb_seid, FALSE, NULL);
1810 if (ret != I40E_SUCCESS) {
1811 error = hw->aq.asq_last_status;
1812 device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1813 i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1816 if (pf->enable_vf_loopback)
1817 ixl_config_pf_vsi_loopback(pf, true);
1820 * Adding a VEB brings back the default MAC filter(s). Remove them,
1821 * and let the driver add the proper filters back.
1823 ixl_del_default_hw_filters(pf_vsi);
1824 ixl_reconfigure_filters(pf_vsi);
1826 pf->num_vfs = num_vfs;
1830 free(pf->vfs, M_IXL);
1836 ixl_if_iov_uninit(if_ctx_t ctx)
1838 struct ixl_pf *pf = iflib_get_softc(ctx);
1840 struct ixl_vsi *vsi;
1849 for (i = 0; i < pf->num_vfs; i++) {
1850 if (pf->vfs[i].vsi.seid != 0)
1851 i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1852 ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1853 ixl_free_mac_filters(&pf->vfs[i].vsi);
1854 ixl_dbg_iov(pf, "VF %d: %d released\n",
1855 i, pf->vfs[i].qtag.num_allocated);
1856 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1859 if (pf->veb_seid != 0) {
1860 i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1863 /* Reset PF VSI loopback mode */
1864 if (pf->enable_vf_loopback)
1865 ixl_config_pf_vsi_loopback(pf, false);
1868 num_vfs = pf->num_vfs;
1873 /* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1874 for (i = 0; i < num_vfs; i++)
1875 sysctl_ctx_free(&vfs[i].ctx);
1880 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1882 device_t dev = pf->dev;
1885 /* Validate, and clamp value if invalid */
1886 if (num_queues < 1 || num_queues > 16)
1887 device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1888 num_queues, vf->vf_num);
1889 if (num_queues < 1) {
1890 device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1892 } else if (num_queues > IAVF_MAX_QUEUES) {
1893 device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1894 num_queues = IAVF_MAX_QUEUES;
1896 error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1898 device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1899 num_queues, vf->vf_num);
1903 ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1904 vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1905 ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1911 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1913 struct ixl_pf *pf = iflib_get_softc(ctx);
1914 device_t dev = pf->dev;
1915 char sysctl_name[QUEUE_NAME_LEN];
1922 vf = &pf->vfs[vfnum];
1925 vf->vf_flags = VF_FLAG_ENABLED;
1926 SLIST_INIT(&vf->vsi.ftl);
1928 /* Reserve queue allocation from PF */
1929 vf_num_queues = nvlist_get_number(params, "num-queues");
1930 error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1934 error = ixl_vf_setup_vsi(pf, vf);
1938 if (nvlist_exists_binary(params, "mac-addr")) {
1939 mac = nvlist_get_binary(params, "mac-addr", &size);
1940 bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1942 if (nvlist_get_bool(params, "allow-set-mac"))
1943 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1946 * If the administrator has not specified a MAC address then
1947 * we must allow the VF to choose one.
1949 vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1951 if (nvlist_get_bool(params, "mac-anti-spoof"))
1952 vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1954 if (nvlist_get_bool(params, "allow-promisc"))
1955 vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1957 vf->vf_flags |= VF_FLAG_VLAN_CAP;
1959 /* VF needs to be reset before it can be used */
1960 ixl_reset_vf(pf, vf);
1963 snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1964 ixl_add_vsi_sysctls(dev, &vf->vsi, &vf->ctx, sysctl_name);