2 * Copyright (c) 2018-2019 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
34 #include "ecore_sriov.h"
35 #include "ecore_status.h"
37 #include "ecore_hw_defs.h"
38 #include "ecore_int.h"
39 #include "ecore_hsi_eth.h"
41 #include "ecore_vfpf_if.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "pcics_reg_driver.h"
45 #include "ecore_gtt_reg_addr.h"
46 #include "ecore_iro.h"
47 #include "ecore_mcp.h"
48 #include "ecore_cxt.h"
50 #include "ecore_init_fw_funcs.h"
51 #include "ecore_sp_commands.h"
53 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
56 union event_ring_data *data,
59 const char *ecore_channel_tlvs_string[] = {
60 "CHANNEL_TLV_NONE", /* ends tlv sequence */
61 "CHANNEL_TLV_ACQUIRE",
62 "CHANNEL_TLV_VPORT_START",
63 "CHANNEL_TLV_VPORT_UPDATE",
64 "CHANNEL_TLV_VPORT_TEARDOWN",
65 "CHANNEL_TLV_START_RXQ",
66 "CHANNEL_TLV_START_TXQ",
67 "CHANNEL_TLV_STOP_RXQ",
68 "CHANNEL_TLV_STOP_TXQ",
69 "CHANNEL_TLV_UPDATE_RXQ",
70 "CHANNEL_TLV_INT_CLEANUP",
72 "CHANNEL_TLV_RELEASE",
73 "CHANNEL_TLV_LIST_END",
74 "CHANNEL_TLV_UCAST_FILTER",
75 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
76 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
77 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
78 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
79 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
80 "CHANNEL_TLV_VPORT_UPDATE_RSS",
81 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
82 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
83 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
84 "CHANNEL_TLV_COALESCE_UPDATE",
86 "CHANNEL_TLV_COALESCE_READ",
90 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
94 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
95 ETH_HSI_VER_NO_PKT_LEN_TUNN)
96 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
98 if (!(p_vf->acquire.vfdev_info.capabilities &
99 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
100 legacy |= ECORE_QCID_LEGACY_VF_CID;
106 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
107 struct ecore_vf_info *p_vf)
109 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
110 struct ecore_spq_entry *p_ent = OSAL_NULL;
111 struct ecore_sp_init_data init_data;
112 enum _ecore_status_t rc = ECORE_NOTIMPL;
116 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
117 init_data.cid = ecore_spq_get_cid(p_hwfn);
118 init_data.opaque_fid = p_vf->opaque_fid;
119 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
121 rc = ecore_sp_init_request(p_hwfn, &p_ent,
122 COMMON_RAMROD_VF_START,
123 PROTOCOLID_COMMON, &init_data);
124 if (rc != ECORE_SUCCESS)
127 p_ramrod = &p_ent->ramrod.vf_start;
129 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
130 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
132 switch (p_hwfn->hw_info.personality) {
134 p_ramrod->personality = PERSONALITY_ETH;
136 case ECORE_PCI_ETH_ROCE:
137 case ECORE_PCI_ETH_IWARP:
138 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
141 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
142 p_hwfn->hw_info.personality);
146 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
147 if (fp_minor > ETH_HSI_VER_MINOR &&
148 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
149 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
150 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
152 ETH_HSI_VER_MAJOR, fp_minor,
153 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
154 fp_minor = ETH_HSI_VER_MINOR;
157 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
158 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
160 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
161 "VF[%d] - Starting using HSI %02x.%02x\n",
162 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
164 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
167 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
171 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
172 struct ecore_spq_entry *p_ent = OSAL_NULL;
173 struct ecore_sp_init_data init_data;
174 enum _ecore_status_t rc = ECORE_NOTIMPL;
177 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
178 init_data.cid = ecore_spq_get_cid(p_hwfn);
179 init_data.opaque_fid = opaque_vfid;
180 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
182 rc = ecore_sp_init_request(p_hwfn, &p_ent,
183 COMMON_RAMROD_VF_STOP,
184 PROTOCOLID_COMMON, &init_data);
185 if (rc != ECORE_SUCCESS)
188 p_ramrod = &p_ent->ramrod.vf_stop;
190 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
192 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
195 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
196 bool b_enabled_only, bool b_non_malicious)
198 if (!p_hwfn->pf_iov_info) {
199 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
203 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
207 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
211 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
218 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
222 struct ecore_vf_info *vf = OSAL_NULL;
224 if (!p_hwfn->pf_iov_info) {
225 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
229 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
230 b_enabled_only, false))
231 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
233 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
239 static struct ecore_queue_cid *
240 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
244 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
245 if (p_queue->cids[i].p_cid &&
246 !p_queue->cids[i].b_is_tx)
247 return p_queue->cids[i].p_cid;
253 enum ecore_iov_validate_q_mode {
254 ECORE_IOV_VALIDATE_Q_NA,
255 ECORE_IOV_VALIDATE_Q_ENABLE,
256 ECORE_IOV_VALIDATE_Q_DISABLE,
259 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
261 enum ecore_iov_validate_q_mode mode,
266 if (mode == ECORE_IOV_VALIDATE_Q_NA)
269 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
270 struct ecore_vf_queue_cid *p_qcid;
272 p_qcid = &p_vf->vf_queues[qid].cids[i];
274 if (p_qcid->p_cid == OSAL_NULL)
277 if (p_qcid->b_is_tx != b_is_tx)
280 /* Found. It's enabled. */
281 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
284 /* In case we haven't found any valid cid, then its disabled */
285 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
288 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
289 struct ecore_vf_info *p_vf,
291 enum ecore_iov_validate_q_mode mode)
293 if (rx_qid >= p_vf->num_rxqs) {
294 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
295 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
296 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
300 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
303 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
304 struct ecore_vf_info *p_vf,
306 enum ecore_iov_validate_q_mode mode)
308 if (tx_qid >= p_vf->num_txqs) {
309 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
310 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
311 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
315 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
318 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
319 struct ecore_vf_info *p_vf,
324 for (i = 0; i < p_vf->num_sbs; i++)
325 if (p_vf->igu_sbs[i] == sb_idx)
328 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
329 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
330 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
335 /* Is there at least 1 queue open? */
336 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
340 for (i = 0; i < p_vf->num_rxqs; i++)
341 if (ecore_iov_validate_queue_mode(p_vf, i,
342 ECORE_IOV_VALIDATE_Q_ENABLE,
349 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
353 for (i = 0; i < p_vf->num_txqs; i++)
354 if (ecore_iov_validate_queue_mode(p_vf, i,
355 ECORE_IOV_VALIDATE_Q_ENABLE,
362 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
364 struct ecore_ptt *p_ptt)
366 struct ecore_bulletin_content *p_bulletin;
367 int crc_size = sizeof(p_bulletin->crc);
368 struct ecore_dmae_params params;
369 struct ecore_vf_info *p_vf;
371 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
375 /* TODO - check VF is in a state where it can accept message */
376 if (!p_vf->vf_bulletin)
379 p_bulletin = p_vf->bulletin.p_virt;
381 /* Increment bulletin board version and compute crc */
382 p_bulletin->version++;
383 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
384 p_vf->bulletin.size - crc_size);
386 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
387 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
388 p_bulletin->version, p_vf->relative_vf_id,
391 /* propagate bulletin board via dmae to vm memory */
392 OSAL_MEMSET(¶ms, 0, sizeof(params));
393 params.flags = ECORE_DMAE_FLAG_VF_DST;
394 params.dst_vfid = p_vf->abs_vf_id;
395 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
396 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
400 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
402 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
405 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
406 OSAL_PCI_READ_CONFIG_WORD(p_dev,
407 pos + PCI_SRIOV_CTRL,
410 OSAL_PCI_READ_CONFIG_WORD(p_dev,
411 pos + PCI_SRIOV_TOTAL_VF,
413 OSAL_PCI_READ_CONFIG_WORD(p_dev,
414 pos + PCI_SRIOV_INITIAL_VF,
417 OSAL_PCI_READ_CONFIG_WORD(p_dev,
418 pos + PCI_SRIOV_NUM_VF,
421 /* @@@TODO - in future we might want to add an OSAL here to
422 * allow each OS to decide on its own how to act.
424 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
425 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
429 OSAL_PCI_READ_CONFIG_WORD(p_dev,
430 pos + PCI_SRIOV_VF_OFFSET,
433 OSAL_PCI_READ_CONFIG_WORD(p_dev,
434 pos + PCI_SRIOV_VF_STRIDE,
437 OSAL_PCI_READ_CONFIG_WORD(p_dev,
438 pos + PCI_SRIOV_VF_DID,
441 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
442 pos + PCI_SRIOV_SUP_PGSIZE,
445 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
449 OSAL_PCI_READ_CONFIG_BYTE(p_dev,
450 pos + PCI_SRIOV_FUNC_LINK,
453 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
454 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
455 iov->nres, iov->cap, iov->ctrl,
456 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
457 iov->offset, iov->stride, iov->pgsz);
459 /* Some sanity checks */
460 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
461 iov->total_vfs > NUM_OF_VFS(p_dev)) {
462 /* This can happen only due to a bug. In this case we set
463 * num_vfs to zero to avoid memory corruption in the code that
464 * assumes max number of vfs
466 DP_NOTICE(p_dev, false, "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
473 return ECORE_SUCCESS;
476 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
478 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
479 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
480 struct ecore_bulletin_content *p_bulletin_virt;
481 dma_addr_t req_p, rply_p, bulletin_p;
482 union pfvf_tlvs *p_reply_virt_addr;
483 union vfpf_tlvs *p_req_virt_addr;
486 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
488 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
489 req_p = p_iov_info->mbx_msg_phys_addr;
490 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
491 rply_p = p_iov_info->mbx_reply_phys_addr;
492 p_bulletin_virt = p_iov_info->p_bulletins;
493 bulletin_p = p_iov_info->bulletins_phys;
494 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
495 DP_ERR(p_hwfn, "ecore_iov_setup_vfdb called without allocating mem first\n");
499 for (idx = 0; idx < p_iov->total_vfs; idx++) {
500 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
503 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
504 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
505 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
506 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
508 #ifdef CONFIG_ECORE_SW_CHANNEL
509 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
510 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
512 vf->state = VF_STOPPED;
515 vf->bulletin.phys = idx *
516 sizeof(struct ecore_bulletin_content) +
518 vf->bulletin.p_virt = p_bulletin_virt + idx;
519 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
521 vf->relative_vf_id = idx;
522 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
523 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
524 vf->concrete_fid = concrete;
525 /* TODO - need to devise a better way of getting opaque */
526 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
527 (vf->abs_vf_id << 8);
529 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
530 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
534 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
536 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
540 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
542 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
543 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
545 /* Allocate PF Mailbox buffer (per-VF) */
546 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
547 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
548 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
549 &p_iov_info->mbx_msg_phys_addr,
550 p_iov_info->mbx_msg_size);
554 /* Allocate PF Mailbox Reply buffer (per-VF) */
555 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
556 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
557 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
558 &p_iov_info->mbx_reply_phys_addr,
559 p_iov_info->mbx_reply_size);
563 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
565 p_v_addr = &p_iov_info->p_bulletins;
566 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
567 &p_iov_info->bulletins_phys,
568 p_iov_info->bulletins_size);
572 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
573 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
574 p_iov_info->mbx_msg_virt_addr,
575 (unsigned long long)p_iov_info->mbx_msg_phys_addr,
576 p_iov_info->mbx_reply_virt_addr,
577 (unsigned long long)p_iov_info->mbx_reply_phys_addr,
578 p_iov_info->p_bulletins,
579 (unsigned long long)p_iov_info->bulletins_phys);
581 return ECORE_SUCCESS;
584 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
586 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
588 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
589 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
590 p_iov_info->mbx_msg_virt_addr,
591 p_iov_info->mbx_msg_phys_addr,
592 p_iov_info->mbx_msg_size);
594 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
595 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
596 p_iov_info->mbx_reply_virt_addr,
597 p_iov_info->mbx_reply_phys_addr,
598 p_iov_info->mbx_reply_size);
600 if (p_iov_info->p_bulletins)
601 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
602 p_iov_info->p_bulletins,
603 p_iov_info->bulletins_phys,
604 p_iov_info->bulletins_size);
607 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
609 struct ecore_pf_iov *p_sriov;
611 if (!IS_PF_SRIOV(p_hwfn)) {
612 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
613 "No SR-IOV - no need for IOV db\n");
614 return ECORE_SUCCESS;
617 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
619 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
623 p_hwfn->pf_iov_info = p_sriov;
625 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
626 ecore_sriov_eqe_event);
628 return ecore_iov_allocate_vfdb(p_hwfn);
631 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
633 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
636 ecore_iov_setup_vfdb(p_hwfn);
639 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
641 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
643 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
644 ecore_iov_free_vfdb(p_hwfn);
645 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
646 p_hwfn->pf_iov_info = OSAL_NULL;
650 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
652 OSAL_FREE(p_dev, p_dev->p_iov_info);
653 p_dev->p_iov_info = OSAL_NULL;
656 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
658 struct ecore_dev *p_dev = p_hwfn->p_dev;
660 enum _ecore_status_t rc;
662 if (IS_VF(p_hwfn->p_dev))
663 return ECORE_SUCCESS;
665 /* Learn the PCI configuration */
666 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
667 PCI_EXT_CAP_ID_SRIOV);
669 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
670 return ECORE_SUCCESS;
673 /* Allocate a new struct for IOV information */
674 /* TODO - can change to VALLOC when its available */
675 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
676 sizeof(*p_dev->p_iov_info));
677 if (!p_dev->p_iov_info) {
678 DP_NOTICE(p_hwfn, false,
679 "Can't support IOV due to lack of memory\n");
682 p_dev->p_iov_info->pos = pos;
684 rc = ecore_iov_pci_cfg_info(p_dev);
688 /* We want PF IOV to be synonemous with the existance of p_iov_info;
689 * In case the capability is published but there are no VFs, simply
690 * de-allocate the struct.
692 if (!p_dev->p_iov_info->total_vfs) {
693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
694 "IOV capabilities, but no VFs are published\n");
695 OSAL_FREE(p_dev, p_dev->p_iov_info);
696 p_dev->p_iov_info = OSAL_NULL;
697 return ECORE_SUCCESS;
700 /* First VF index based on offset is tricky:
701 * - If ARI is supported [likely], offset - (16 - pf_id) would
702 * provide the number for eng0. 2nd engine Vfs would begin
703 * after the first engine's VFs.
704 * - If !ARI, VFs would start on next device.
705 * so offset - (256 - pf_id) would provide the number.
706 * Utilize the fact that (256 - pf_id) is achieved only be later
707 * to diffrentiate between the two.
710 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
711 u32 first = p_hwfn->p_dev->p_iov_info->offset +
712 p_hwfn->abs_pf_id - 16;
714 p_dev->p_iov_info->first_vf_in_pf = first;
716 if (ECORE_PATH_ID(p_hwfn))
717 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
719 u32 first = p_hwfn->p_dev->p_iov_info->offset +
720 p_hwfn->abs_pf_id - 256;
722 p_dev->p_iov_info->first_vf_in_pf = first;
725 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
726 "First VF in hwfn 0x%08x\n",
727 p_dev->p_iov_info->first_vf_in_pf);
729 return ECORE_SUCCESS;
732 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
733 bool b_fail_malicious)
735 /* Check PF supports sriov */
736 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
737 !IS_PF_SRIOV_ALLOC(p_hwfn))
740 /* Check VF validity */
741 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
747 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
749 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
752 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
756 struct ecore_vf_info *vf;
759 for_each_hwfn(p_dev, i) {
760 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
762 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
766 vf->to_disable = to_disable;
770 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
775 if (!IS_ECORE_SRIOV(p_dev))
778 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
779 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
783 /* @@@TBD Consider taking outside of ecore... */
784 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
788 enum _ecore_status_t rc = ECORE_SUCCESS;
789 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
791 if (vf != OSAL_NULL) {
793 #ifdef CONFIG_ECORE_SW_CHANNEL
794 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
797 rc = ECORE_UNKNOWN_ERROR;
803 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
804 struct ecore_ptt *p_ptt,
807 ecore_wr(p_hwfn, p_ptt,
808 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
809 1 << (abs_vfid & 0x1f));
812 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
813 struct ecore_ptt *p_ptt,
814 struct ecore_vf_info *vf)
818 /* Set VF masks and configuration - pretend */
819 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
821 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
824 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
826 /* iterate over all queues, clear sb consumer */
827 for (i = 0; i < vf->num_sbs; i++)
828 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
830 vf->opaque_fid, true);
833 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
834 struct ecore_ptt *p_ptt,
835 struct ecore_vf_info *vf,
840 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
842 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
845 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
847 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
850 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
853 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
856 static enum _ecore_status_t
857 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
858 struct ecore_ptt *p_ptt,
865 /* If client overrides this, don't do anything */
866 if (p_hwfn->p_dev->b_dont_override_vf_msix)
867 return ECORE_SUCCESS;
869 /* For AH onward, configuration is per-PF. Find maximum of all
870 * the currently enabled child VFs, and set the number to be that.
872 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
873 ecore_for_each_vf(p_hwfn, i) {
874 struct ecore_vf_info *p_vf;
876 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
880 current_max = OSAL_MAX_T(u8, current_max,
885 if (num_sbs > current_max)
886 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
889 return ECORE_SUCCESS;
892 static enum _ecore_status_t ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
893 struct ecore_ptt *p_ptt,
894 struct ecore_vf_info *vf)
896 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
897 enum _ecore_status_t rc = ECORE_SUCCESS;
899 /* It's possible VF was previously considered malicious -
900 * clear the indication even if we're only going to disable VF.
902 vf->b_malicious = false;
905 return ECORE_SUCCESS;
907 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Enable internal access for vf %x [abs %x]\n",
908 vf->abs_vf_id, ECORE_VF_ABS_ID(p_hwfn, vf));
910 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
911 ECORE_VF_ABS_ID(p_hwfn, vf));
913 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
915 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
916 vf->abs_vf_id, vf->num_sbs);
917 if (rc != ECORE_SUCCESS)
920 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
922 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
923 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
925 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
926 p_hwfn->hw_info.hw_mode);
929 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
937 * @brief ecore_iov_config_perm_table - configure the permission
939 * In E4, queue zone permission table size is 320x9. There
940 * are 320 VF queues for single engine device (256 for dual
941 * engine device), and each entry has the following format:
948 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
949 struct ecore_ptt *p_ptt,
950 struct ecore_vf_info *vf,
957 for (qid = 0; qid < vf->num_rxqs; qid++) {
958 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
961 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
962 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
963 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
967 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
968 struct ecore_ptt *p_ptt,
969 struct ecore_vf_info *vf)
971 /* Reset vf in IGU - interrupts are still disabled */
972 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
974 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
976 /* Permission Table */
977 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
980 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
981 struct ecore_ptt *p_ptt,
982 struct ecore_vf_info *vf,
985 struct ecore_igu_block *p_block;
986 struct cau_sb_entry sb_entry;
990 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
992 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
993 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
995 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
996 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
997 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
999 for (qid = 0; qid < num_rx_queues; qid++) {
1000 p_block = ecore_get_igu_free_sb(p_hwfn, false);
1001 vf->igu_sbs[qid] = p_block->igu_sb_id;
1002 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1003 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
1005 ecore_wr(p_hwfn, p_ptt,
1006 IGU_REG_MAPPING_MEMORY +
1007 sizeof(u32) * p_block->igu_sb_id, val);
1009 /* Configure igu sb in CAU which were marked valid */
1010 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
1014 ecore_dmae_host2grc(p_hwfn, p_ptt,
1015 (u64)(osal_uintptr_t)&sb_entry,
1016 CAU_REG_SB_VAR_MEMORY +
1017 p_block->igu_sb_id * sizeof(u64), 2,
1018 OSAL_NULL /* default parameters */);
1021 vf->num_sbs = (u8)num_rx_queues;
1028 * @brief The function invalidates all the VF entries,
1029 * technically this isn't required, but added for
1030 * cleaness and ease of debugging incase a VF attempts to
1031 * produce an interrupt after it has been taken down.
1037 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1038 struct ecore_ptt *p_ptt,
1039 struct ecore_vf_info *vf)
1042 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1046 /* Invalidate igu CAM lines and mark them as free */
1047 for (idx = 0; idx < vf->num_sbs; idx++) {
1048 igu_id = vf->igu_sbs[idx];
1049 addr = IGU_REG_MAPPING_MEMORY +
1050 sizeof(u32) * igu_id;
1052 val = ecore_rd(p_hwfn, p_ptt, addr);
1053 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1054 ecore_wr(p_hwfn, p_ptt, addr, val);
1056 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1057 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1063 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1065 struct ecore_mcp_link_params *params,
1066 struct ecore_mcp_link_state *link,
1067 struct ecore_mcp_link_capabilities *p_caps)
1069 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1070 struct ecore_bulletin_content *p_bulletin;
1075 p_bulletin = p_vf->bulletin.p_virt;
1076 p_bulletin->req_autoneg = params->speed.autoneg;
1077 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1078 p_bulletin->req_forced_speed = params->speed.forced_speed;
1079 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1080 p_bulletin->req_forced_rx = params->pause.forced_rx;
1081 p_bulletin->req_forced_tx = params->pause.forced_tx;
1082 p_bulletin->req_loopback = params->loopback_mode;
1084 p_bulletin->link_up = link->link_up;
1085 p_bulletin->speed = link->speed;
1086 p_bulletin->full_duplex = link->full_duplex;
1087 p_bulletin->autoneg = link->an;
1088 p_bulletin->autoneg_complete = link->an_complete;
1089 p_bulletin->parallel_detection = link->parallel_detection;
1090 p_bulletin->pfc_enabled = link->pfc_enabled;
1091 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1092 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1093 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1094 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1095 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1097 p_bulletin->capability_speed = p_caps->speed_capabilities;
1100 enum _ecore_status_t
1101 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1102 struct ecore_ptt *p_ptt,
1103 struct ecore_iov_vf_init_params *p_params)
1105 struct ecore_mcp_link_capabilities link_caps;
1106 struct ecore_mcp_link_params link_params;
1107 struct ecore_mcp_link_state link_state;
1108 u8 num_of_vf_avaiable_chains = 0;
1109 struct ecore_vf_info *vf = OSAL_NULL;
1111 enum _ecore_status_t rc = ECORE_SUCCESS;
1115 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1117 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1118 return ECORE_UNKNOWN_ERROR;
1122 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1123 p_params->rel_vf_id);
1127 /* Perform sanity checking on the requested vport/rss */
1128 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1129 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1130 p_params->rel_vf_id, p_params->vport_id);
1134 if ((p_params->num_queues > 1) &&
1135 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1136 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1137 p_params->rel_vf_id, p_params->rss_eng_id);
1141 /* TODO - remove this once we get confidence of change */
1142 if (!p_params->vport_id) {
1143 DP_NOTICE(p_hwfn, false,
1144 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1145 p_params->rel_vf_id);
1147 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1148 DP_NOTICE(p_hwfn, false,
1149 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1150 p_params->rel_vf_id);
1152 vf->vport_id = p_params->vport_id;
1153 vf->rss_eng_id = p_params->rss_eng_id;
1155 /* Since it's possible to relocate SBs, it's a bit difficult to check
1156 * things here. Simply check whether the index falls in the range
1157 * belonging to the PF.
1159 for (i = 0; i < p_params->num_queues; i++) {
1160 qid = p_params->req_rx_queue[i];
1161 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1162 DP_NOTICE(p_hwfn, true,
1163 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1164 qid, p_params->rel_vf_id,
1165 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1169 qid = p_params->req_tx_queue[i];
1170 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1171 DP_NOTICE(p_hwfn, true,
1172 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1173 qid, p_params->rel_vf_id,
1174 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1179 /* Limit number of queues according to number of CIDs */
1180 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1181 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1182 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1183 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1184 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1186 num_of_vf_avaiable_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1190 if (num_of_vf_avaiable_chains == 0) {
1191 DP_ERR(p_hwfn, "no available igu sbs\n");
1195 /* Choose queue number and index ranges */
1196 vf->num_rxqs = num_of_vf_avaiable_chains;
1197 vf->num_txqs = num_of_vf_avaiable_chains;
1199 for (i = 0; i < vf->num_rxqs; i++) {
1200 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1202 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1203 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1205 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1206 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1207 vf->relative_vf_id, i, vf->igu_sbs[i],
1208 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1211 /* Update the link configuration in bulletin.
1213 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1214 sizeof(link_params));
1215 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1216 sizeof(link_state));
1217 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1219 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1220 &link_params, &link_state, &link_caps);
1222 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1224 if (rc == ECORE_SUCCESS) {
1227 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1228 (1ULL << (vf->relative_vf_id % 64));
1231 if (IS_LEAD_HWFN(p_hwfn))
1232 p_hwfn->p_dev->p_iov_info->num_vfs++;
1238 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1239 struct ecore_ptt *p_ptt,
1242 struct ecore_mcp_link_capabilities caps;
1243 struct ecore_mcp_link_params params;
1244 struct ecore_mcp_link_state link;
1245 struct ecore_vf_info *vf = OSAL_NULL;
1247 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1249 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1250 return ECORE_UNKNOWN_ERROR;
1253 if (vf->bulletin.p_virt)
1254 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1255 sizeof(*vf->bulletin.p_virt));
1257 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1259 /* Get the link configuration back in bulletin so
1260 * that when VFs are re-enabled they get the actual
1261 * link configuration.
1263 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1264 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1265 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1267 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1269 /* Forget the VF's acquisition message */
1270 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1272 /* disablng interrupts and resetting permission table was done during
1273 * vf-close, however, we could get here without going through vf_close
1275 /* Disable Interrupts for VF */
1276 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1278 /* Reset Permission table */
1279 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1283 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1288 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1289 ~(1ULL << (vf->relative_vf_id / 64));
1292 if (IS_LEAD_HWFN(p_hwfn))
1293 p_hwfn->p_dev->p_iov_info->num_vfs--;
1296 return ECORE_SUCCESS;
1299 static bool ecore_iov_tlv_supported(u16 tlvtype)
1301 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1304 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1305 struct ecore_vf_info *vf,
1308 /* lock the channel */
1309 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1311 /* record the locking op */
1312 /* vf->op_current = tlv; @@@TBD MichalK */
1315 if (ecore_iov_tlv_supported(tlv))
1318 "VF[%d]: vf pf channel locked by %s\n",
1320 ecore_channel_tlvs_string[tlv]);
1324 "VF[%d]: vf pf channel locked by %04x\n",
1325 vf->abs_vf_id, tlv);
1328 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1329 struct ecore_vf_info *vf,
1332 /*WARN(expected_tlv != vf->op_current,
1333 "lock mismatch: expected %s found %s",
1334 channel_tlvs_string[expected_tlv],
1335 channel_tlvs_string[vf->op_current]);
1339 /* lock the channel */
1340 /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
1342 /* log the unlock */
1343 if (ecore_iov_tlv_supported(expected_tlv))
1346 "VF[%d]: vf pf channel unlocked by %s\n",
1348 ecore_channel_tlvs_string[expected_tlv]);
1352 "VF[%d]: vf pf channel unlocked by %04x\n",
1353 vf->abs_vf_id, expected_tlv);
1355 /* record the locking op */
1356 /* vf->op_current = CHANNEL_TLV_NONE;*/
1359 /* place a given tlv on the tlv buffer, continuing current tlv list */
1360 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1362 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1365 tl->length = length;
1367 /* Offset should keep pointing to next TLV (the end of the last) */
1370 /* Return a pointer to the start of the added tlv */
1371 return *offset - length;
1374 /* list the types and lengths of the tlvs on the buffer */
1375 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1377 u16 i = 1, total_length = 0;
1378 struct channel_tlv *tlv;
1381 /* cast current tlv list entry to channel tlv header*/
1382 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1385 if (ecore_iov_tlv_supported(tlv->type))
1386 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1387 "TLV number %d: type %s, length %d\n",
1388 i, ecore_channel_tlvs_string[tlv->type],
1391 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1392 "TLV number %d: type %d, length %d\n",
1393 i, tlv->type, tlv->length);
1395 if (tlv->type == CHANNEL_TLV_LIST_END)
1398 /* Validate entry - protect against malicious VFs */
1400 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1404 total_length += tlv->length;
1406 if (total_length >= sizeof(struct tlv_buffer_size)) {
1407 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1415 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1416 struct ecore_ptt *p_ptt,
1417 struct ecore_vf_info *p_vf,
1418 #ifdef CONFIG_ECORE_SW_CHANNEL
1421 u16 OSAL_UNUSED length,
1425 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1426 struct ecore_dmae_params params;
1429 mbx->reply_virt->default_resp.hdr.status = status;
1431 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1433 #ifdef CONFIG_ECORE_SW_CHANNEL
1434 mbx->sw_mbx.response_size =
1435 length + sizeof(struct channel_list_end_tlv);
1437 if (!p_vf->b_hw_channel)
1441 eng_vf_id = p_vf->abs_vf_id;
1443 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1444 params.flags = ECORE_DMAE_FLAG_VF_DST;
1445 params.dst_vfid = eng_vf_id;
1447 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1448 mbx->req_virt->first_tlv.reply_address +
1450 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4 ,
1453 /* Once PF copies the rc to the VF, the latter can continue and
1454 * and send an additional message. So we have to make sure the
1455 * channel would be re-set to ready prior to that.
1458 GTT_BAR0_MAP_REG_USDM_RAM +
1459 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id),
1462 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1463 mbx->req_virt->first_tlv.reply_address,
1464 sizeof(u64) / 4, ¶ms);
1466 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1469 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1472 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1473 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1474 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1475 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1476 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1477 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1478 case ECORE_IOV_VP_UPDATE_MCAST:
1479 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1480 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1481 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1482 case ECORE_IOV_VP_UPDATE_RSS:
1483 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1484 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1485 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1486 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1487 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1493 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1494 struct ecore_vf_info *p_vf,
1495 struct ecore_iov_vf_mbx *p_mbx,
1496 u8 status, u16 tlvs_mask,
1499 struct pfvf_def_resp_tlv *resp;
1500 u16 size, total_len, i;
1502 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1503 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1504 size = sizeof(struct pfvf_def_resp_tlv);
1507 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1509 /* Prepare response for all extended tlvs if they are found by PF */
1510 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1511 if (!(tlvs_mask & (1 << i)))
1514 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1517 if (tlvs_accepted & (1 << i))
1518 resp->hdr.status = status;
1520 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1522 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1523 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1524 p_vf->relative_vf_id,
1525 ecore_iov_vport_to_tlv(i),
1531 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1532 sizeof(struct channel_list_end_tlv));
1537 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1538 struct ecore_ptt *p_ptt,
1539 struct ecore_vf_info *vf_info,
1540 u16 type, u16 length, u8 status)
1542 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1544 mbx->offset = (u8 *)mbx->reply_virt;
1546 ecore_add_tlv(&mbx->offset, type, length);
1547 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1548 sizeof(struct channel_list_end_tlv));
1550 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1553 struct ecore_public_vf_info * ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1555 bool b_enabled_only)
1557 struct ecore_vf_info *vf = OSAL_NULL;
1559 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1563 return &vf->p_vf_info;
1566 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1567 struct ecore_vf_info *p_vf)
1571 p_vf->vf_bulletin = 0;
1572 p_vf->vport_instance = 0;
1573 p_vf->configured_features = 0;
1575 /* If VF previously requested less resources, go back to default */
1576 p_vf->num_rxqs = p_vf->num_sbs;
1577 p_vf->num_txqs = p_vf->num_sbs;
1579 p_vf->num_active_rxqs = 0;
1581 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1582 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1584 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1585 if (!p_queue->cids[j].p_cid)
1588 ecore_eth_queue_cid_release(p_hwfn,
1589 p_queue->cids[j].p_cid);
1590 p_queue->cids[j].p_cid = OSAL_NULL;
1594 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1595 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1596 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1599 /* Returns either 0, or log(size) */
1600 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1601 struct ecore_ptt *p_ptt)
1603 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1611 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1612 struct ecore_ptt *p_ptt,
1613 struct ecore_vf_info *p_vf,
1614 struct vf_pf_resc_request *p_req,
1615 struct pf_vf_resc *p_resp)
1617 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1618 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1619 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1622 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1624 /* If VF didn't bother asking for QIDs than don't bother limiting
1625 * number of CIDs. The VF doesn't care about the number, and this
1626 * has the likely result of causing an additional acquisition.
1628 if (!(p_vf->acquire.vfdev_info.capabilities &
1629 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1632 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1633 * that would make sure doorbells for all CIDs fall within the bar.
1634 * If it doesn't, make sure regview window is sufficient.
1636 if (p_vf->acquire.vfdev_info.capabilities &
1637 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1638 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1640 bar_size = 1 << bar_size;
1642 if (ECORE_IS_CMT(p_hwfn->p_dev))
1645 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1648 if (bar_size / db_size < 256)
1649 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1650 (u8)(bar_size / db_size));
1653 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1654 struct ecore_ptt *p_ptt,
1655 struct ecore_vf_info *p_vf,
1656 struct vf_pf_resc_request *p_req,
1657 struct pf_vf_resc *p_resp)
1661 /* Queue related information */
1662 p_resp->num_rxqs = p_vf->num_rxqs;
1663 p_resp->num_txqs = p_vf->num_txqs;
1664 p_resp->num_sbs = p_vf->num_sbs;
1666 for (i = 0; i < p_resp->num_sbs; i++) {
1667 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1668 /* TODO - what's this sb_qid field? Is it deprecated?
1669 * or is there an ecore_client that looks at this?
1671 p_resp->hw_sbs[i].sb_qid = 0;
1674 /* These fields are filled for backward compatibility.
1675 * Unused by modern vfs.
1677 for (i = 0; i < p_resp->num_rxqs; i++) {
1678 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1679 (u16 *)&p_resp->hw_qid[i]);
1683 /* Filter related information */
1684 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1685 p_req->num_mac_filters);
1686 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1687 p_req->num_vlan_filters);
1689 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1691 /* This isn't really needed/enforced, but some legacy VFs might depend
1692 * on the correct filling of this field.
1694 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1696 /* Validate sufficient resources for VF */
1697 if (p_resp->num_rxqs < p_req->num_rxqs ||
1698 p_resp->num_txqs < p_req->num_txqs ||
1699 p_resp->num_sbs < p_req->num_sbs ||
1700 p_resp->num_mac_filters < p_req->num_mac_filters ||
1701 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1702 p_resp->num_mc_filters < p_req->num_mc_filters ||
1703 p_resp->num_cids < p_req->num_cids) {
1704 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1705 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1707 p_req->num_rxqs, p_resp->num_rxqs,
1708 p_req->num_rxqs, p_resp->num_txqs,
1709 p_req->num_sbs, p_resp->num_sbs,
1710 p_req->num_mac_filters, p_resp->num_mac_filters,
1711 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1712 p_req->num_mc_filters, p_resp->num_mc_filters,
1713 p_req->num_cids, p_resp->num_cids);
1715 /* Some legacy OSes are incapable of correctly handling this
1718 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1719 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1720 (p_vf->acquire.vfdev_info.os_type ==
1721 VFPF_ACQUIRE_OS_WINDOWS))
1722 return PFVF_STATUS_SUCCESS;
1724 return PFVF_STATUS_NO_RESOURCE;
1727 return PFVF_STATUS_SUCCESS;
1730 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1732 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1733 OFFSETOF(struct mstorm_vf_zone,
1734 non_trigger.eth_queue_stat);
1735 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1736 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1737 OFFSETOF(struct ustorm_vf_zone,
1738 non_trigger.eth_queue_stat);
1739 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1740 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1741 OFFSETOF(struct pstorm_vf_zone,
1742 non_trigger.eth_queue_stat);
1743 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1744 p_stats->tstats.address = 0;
1745 p_stats->tstats.len = 0;
1748 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1749 struct ecore_ptt *p_ptt,
1750 struct ecore_vf_info *vf)
1752 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1753 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1754 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1755 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1756 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1757 struct pf_vf_resc *resc = &resp->resc;
1758 enum _ecore_status_t rc;
1760 OSAL_MEMSET(resp, 0, sizeof(*resp));
1762 /* Write the PF version so that VF would know which version
1763 * is supported - might be later overriden. This guarantees that
1764 * VF could recognize legacy PF based on lack of versions in reply.
1766 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1767 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1769 /* TODO - not doing anything is bad since we'll assert, but this isn't
1770 * necessarily the right behavior - perhaps we should have allowed some
1773 if (vf->state != VF_FREE &&
1774 vf->state != VF_STOPPED) {
1775 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1776 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1777 vf->abs_vf_id, vf->state);
1781 /* Validate FW compatibility */
1782 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1783 if (req->vfdev_info.capabilities &
1784 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1785 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1787 /* This legacy support would need to be removed once
1788 * the major has changed.
1790 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1792 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1793 "VF[%d] is pre-fastpath HSI\n",
1795 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1796 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1799 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1801 req->vfdev_info.eth_fp_hsi_major,
1802 req->vfdev_info.eth_fp_hsi_minor,
1803 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1809 /* On 100g PFs, prevent old VFs from loading */
1810 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1811 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1812 DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n",
1817 #ifndef __EXTRACT__LINUX__
1818 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1819 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1824 /* Store the acquire message */
1825 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1827 vf->opaque_fid = req->vfdev_info.opaque_fid;
1829 vf->vf_bulletin = req->bulletin_addr;
1830 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1831 vf->bulletin.size : req->bulletin_size;
1833 /* fill in pfdev info */
1834 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1835 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1836 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1838 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1839 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1840 if (ECORE_IS_CMT(p_hwfn->p_dev))
1841 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1843 /* Share our ability to use multiple queue-ids only with VFs
1846 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1847 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1849 /* Share the sizes of the bars with VF */
1850 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1853 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1855 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1858 pfdev_info->fw_major = FW_MAJOR_VERSION;
1859 pfdev_info->fw_minor = FW_MINOR_VERSION;
1860 pfdev_info->fw_rev = FW_REVISION_VERSION;
1861 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1863 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1866 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1867 req->vfdev_info.eth_fp_hsi_minor);
1868 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1869 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1872 pfdev_info->dev_type = p_hwfn->p_dev->type;
1873 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1875 /* Fill resources available to VF; Make sure there are enough to
1876 * satisfy the VF's request.
1878 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1879 &req->resc_request, resc);
1880 if (vfpf_status != PFVF_STATUS_SUCCESS)
1883 /* Start the VF in FW */
1884 rc = ecore_sp_vf_start(p_hwfn, vf);
1885 if (rc != ECORE_SUCCESS) {
1886 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1888 vfpf_status = PFVF_STATUS_FAILURE;
1892 /* Fill agreed size of bulletin board in response, and post
1893 * an initial image to the bulletin board.
1895 resp->bulletin_size = vf->bulletin.size;
1896 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1898 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1899 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1900 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1901 vf->abs_vf_id, resp->pfdev_info.chip_num,
1902 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1903 (unsigned long long)resp->pfdev_info.capabilities, resc->num_rxqs,
1904 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1905 resc->num_vlan_filters);
1907 vf->state = VF_ACQUIRED;
1910 /* Prepare Response */
1911 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1912 sizeof(struct pfvf_acquire_resp_tlv),
1916 static enum _ecore_status_t __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1917 struct ecore_vf_info *p_vf, bool val)
1919 struct ecore_sp_vport_update_params params;
1920 enum _ecore_status_t rc;
1922 if (val == p_vf->spoof_chk) {
1923 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1924 "Spoofchk value[%d] is already configured\n",
1926 return ECORE_SUCCESS;
1929 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1930 params.opaque_fid = p_vf->opaque_fid;
1931 params.vport_id = p_vf->vport_id;
1932 params.update_anti_spoofing_en_flg = 1;
1933 params.anti_spoofing_en = val;
1935 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1937 if (rc == ECORE_SUCCESS) {
1938 p_vf->spoof_chk = val;
1939 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1940 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1941 "Spoofchk val[%d] configured\n", val);
1943 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1944 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1945 val, p_vf->relative_vf_id);
1951 static enum _ecore_status_t ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1952 struct ecore_vf_info *p_vf)
1954 struct ecore_filter_ucast filter;
1955 enum _ecore_status_t rc = ECORE_SUCCESS;
1958 OSAL_MEMSET(&filter, 0, sizeof(filter));
1959 filter.is_rx_filter = 1;
1960 filter.is_tx_filter = 1;
1961 filter.vport_to_add_to = p_vf->vport_id;
1962 filter.opcode = ECORE_FILTER_ADD;
1964 /* Reconfigure vlans */
1965 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1966 if (!p_vf->shadow_config.vlans[i].used)
1969 filter.type = ECORE_FILTER_VLAN;
1970 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1971 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1972 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1973 filter.vlan, p_vf->relative_vf_id);
1974 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1975 &filter, ECORE_SPQ_MODE_CB, OSAL_NULL);
1977 DP_NOTICE(p_hwfn, true, "Failed to configure VLAN [%04x] to VF [%04x]\n",
1979 p_vf->relative_vf_id);
1987 static enum _ecore_status_t
1988 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1989 struct ecore_vf_info *p_vf,
1992 enum _ecore_status_t rc = ECORE_SUCCESS;
1994 /*TODO - what about MACs? */
1996 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1997 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1998 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
2003 static enum _ecore_status_t
2004 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
2005 struct ecore_vf_info *p_vf,
2008 enum _ecore_status_t rc = ECORE_SUCCESS;
2009 struct ecore_filter_ucast filter;
2011 if (!p_vf->vport_instance)
2014 if (events & (1 << MAC_ADDR_FORCED)) {
2015 /* Since there's no way [currently] of removing the MAC,
2016 * we can always assume this means we need to force it.
2018 OSAL_MEMSET(&filter, 0, sizeof(filter));
2019 filter.type = ECORE_FILTER_MAC;
2020 filter.opcode = ECORE_FILTER_REPLACE;
2021 filter.is_rx_filter = 1;
2022 filter.is_tx_filter = 1;
2023 filter.vport_to_add_to = p_vf->vport_id;
2024 OSAL_MEMCPY(filter.mac,
2025 p_vf->bulletin.p_virt->mac,
2028 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2030 ECORE_SPQ_MODE_CB, OSAL_NULL);
2032 DP_NOTICE(p_hwfn, true,
2033 "PF failed to configure MAC for VF\n");
2037 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
2040 if (events & (1 << VLAN_ADDR_FORCED)) {
2041 struct ecore_sp_vport_update_params vport_update;
2045 OSAL_MEMSET(&filter, 0, sizeof(filter));
2046 filter.type = ECORE_FILTER_VLAN;
2047 filter.is_rx_filter = 1;
2048 filter.is_tx_filter = 1;
2049 filter.vport_to_add_to = p_vf->vport_id;
2050 filter.vlan = p_vf->bulletin.p_virt->pvid;
2051 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2054 /* Send the ramrod */
2055 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2057 ECORE_SPQ_MODE_CB, OSAL_NULL);
2059 DP_NOTICE(p_hwfn, true,
2060 "PF failed to configure VLAN for VF\n");
2064 /* Update the default-vlan & silent vlan stripping */
2065 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2066 vport_update.opaque_fid = p_vf->opaque_fid;
2067 vport_update.vport_id = p_vf->vport_id;
2068 vport_update.update_default_vlan_enable_flg = 1;
2069 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2070 vport_update.update_default_vlan_flg = 1;
2071 vport_update.default_vlan = filter.vlan;
2073 vport_update.update_inner_vlan_removal_flg = 1;
2074 removal = filter.vlan ?
2075 1 : p_vf->shadow_config.inner_vlan_removal;
2076 vport_update.inner_vlan_removal_flg = removal;
2077 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2078 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2079 ECORE_SPQ_MODE_EBLOCK,
2082 DP_NOTICE(p_hwfn, true,
2083 "PF failed to configure VF vport for vlan\n");
2087 /* Update all the Rx queues */
2088 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2089 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2090 struct ecore_queue_cid *p_cid = OSAL_NULL;
2092 /* There can be at most 1 Rx queue on qzone. Find it */
2093 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2094 if (p_cid == OSAL_NULL)
2097 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2100 ECORE_SPQ_MODE_EBLOCK,
2103 DP_NOTICE(p_hwfn, true,
2104 "Failed to send Rx update fo queue[0x%04x]\n",
2105 p_cid->rel.queue_id);
2111 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2113 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2116 /* If forced features are terminated, we need to configure the shadow
2117 * configuration back again.
2120 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2125 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2126 struct ecore_ptt *p_ptt,
2127 struct ecore_vf_info *vf)
2129 struct ecore_sp_vport_start_params params = {0};
2130 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2131 struct vfpf_vport_start_tlv *start;
2132 u8 status = PFVF_STATUS_SUCCESS;
2133 struct ecore_vf_info *vf_info;
2136 enum _ecore_status_t rc;
2138 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2140 DP_NOTICE(p_hwfn->p_dev, true,
2141 "Failed to get VF info, invalid vfid [%d]\n",
2142 vf->relative_vf_id);
2146 vf->state = VF_ENABLED;
2147 start = &mbx->req_virt->start_vport;
2149 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2151 /* Initialize Status block in CAU */
2152 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2153 if (!start->sb_addr[sb_id]) {
2154 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2155 "VF[%d] did not fill the address of SB %d\n",
2156 vf->relative_vf_id, sb_id);
2160 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2161 start->sb_addr[sb_id],
2166 vf->mtu = start->mtu;
2167 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2169 /* Take into consideration configuration forced by hypervisor;
2170 * If none is configured, use the supplied VF values [for old
2171 * vfs that would still be fine, since they passed '0' as padding].
2173 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2174 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2175 u8 vf_req = start->only_untagged;
2177 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2178 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2181 params.tpa_mode = start->tpa_mode;
2182 params.remove_inner_vlan = start->inner_vlan_removal;
2183 params.tx_switching = true;
2184 params.zero_placement_offset = start->zero_placement_offset;
2187 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2188 DP_NOTICE(p_hwfn, false, "FPGA: Don't configure VF for Tx-switching [no pVFC]\n");
2189 params.tx_switching = false;
2193 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2194 params.drop_ttl0 = false;
2195 params.concrete_fid = vf->concrete_fid;
2196 params.opaque_fid = vf->opaque_fid;
2197 params.vport_id = vf->vport_id;
2198 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2199 params.mtu = vf->mtu;
2200 params.check_mac = true;
2202 #ifndef ECORE_UPSTREAM
2203 rc = OSAL_IOV_PRE_START_VPORT(p_hwfn, vf->relative_vf_id, ¶ms);
2204 if (rc != ECORE_SUCCESS) {
2205 DP_ERR(p_hwfn, "OSAL_IOV_PRE_START_VPORT returned error %d\n", rc);
2206 status = PFVF_STATUS_FAILURE;
2211 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2212 if (rc != ECORE_SUCCESS) {
2213 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2214 status = PFVF_STATUS_FAILURE;
2216 vf->vport_instance++;
2218 /* Force configuration if needed on the newly opened vport */
2219 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2220 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2221 vf->vport_id, vf->opaque_fid);
2222 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2224 #ifndef ECORE_UPSTREAM
2227 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2228 sizeof(struct pfvf_def_resp_tlv), status);
2231 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2232 struct ecore_ptt *p_ptt,
2233 struct ecore_vf_info *vf)
2235 u8 status = PFVF_STATUS_SUCCESS;
2236 enum _ecore_status_t rc;
2238 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2239 vf->vport_instance--;
2240 vf->spoof_chk = false;
2242 if ((ecore_iov_validate_active_rxq(vf)) ||
2243 (ecore_iov_validate_active_txq(vf))) {
2244 vf->b_malicious = true;
2246 false, " VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
2248 status = PFVF_STATUS_MALICIOUS;
2252 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2253 if (rc != ECORE_SUCCESS) {
2254 DP_ERR(p_hwfn, "ecore_iov_vf_mbx_stop_vport returned error %d\n",
2256 status = PFVF_STATUS_FAILURE;
2259 /* Forget the configuration on the vport */
2260 vf->configured_features = 0;
2261 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2264 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2265 sizeof(struct pfvf_def_resp_tlv), status);
2268 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2269 struct ecore_ptt *p_ptt,
2270 struct ecore_vf_info *vf,
2271 u8 status, bool b_legacy)
2273 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2274 struct pfvf_start_queue_resp_tlv *p_tlv;
2275 struct vfpf_start_rxq_tlv *req;
2278 mbx->offset = (u8 *)mbx->reply_virt;
2280 /* Taking a bigger struct instead of adding a TLV to list was a
2281 * mistake, but one which we're now stuck with, as some older
2282 * clients assume the size of the previous response.
2285 length = sizeof(*p_tlv);
2287 length = sizeof(struct pfvf_def_resp_tlv);
2289 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2290 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2291 sizeof(struct channel_list_end_tlv));
2293 /* Update the TLV with the response */
2294 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2295 req = &mbx->req_virt->start_rxq;
2296 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2297 OFFSETOF(struct mstorm_vf_zone,
2298 non_trigger.eth_rx_queue_producers) +
2299 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2302 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2305 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2306 struct ecore_vf_info *p_vf, bool b_is_tx)
2308 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2309 struct vfpf_qid_tlv *p_qid_tlv;
2311 /* Search for the qid if the VF published if its going to provide it */
2312 if (!(p_vf->acquire.vfdev_info.capabilities &
2313 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2315 return ECORE_IOV_LEGACY_QID_TX;
2317 return ECORE_IOV_LEGACY_QID_RX;
2320 p_qid_tlv = (struct vfpf_qid_tlv *)
2321 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2323 if (p_qid_tlv == OSAL_NULL) {
2324 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2325 "VF[%2x]: Failed to provide qid\n",
2326 p_vf->relative_vf_id);
2328 return ECORE_IOV_QID_INVALID;
2331 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2332 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2333 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2334 p_vf->relative_vf_id, p_qid_tlv->qid);
2335 return ECORE_IOV_QID_INVALID;
2338 return p_qid_tlv->qid;
2341 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2342 struct ecore_ptt *p_ptt,
2343 struct ecore_vf_info *vf)
2345 struct ecore_queue_start_common_params params;
2346 struct ecore_queue_cid_vf_params vf_params;
2347 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2348 u8 status = PFVF_STATUS_NO_RESOURCE;
2349 u8 qid_usage_idx, vf_legacy = 0;
2350 struct ecore_vf_queue *p_queue;
2351 struct vfpf_start_rxq_tlv *req;
2352 struct ecore_queue_cid *p_cid;
2353 struct ecore_sb_info sb_dummy;
2354 enum _ecore_status_t rc;
2356 req = &mbx->req_virt->start_rxq;
2358 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2359 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2360 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2363 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2364 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2367 p_queue = &vf->vf_queues[req->rx_qid];
2368 if (p_queue->cids[qid_usage_idx].p_cid)
2371 vf_legacy = ecore_vf_calculate_legacy(vf);
2373 /* Acquire a new queue-cid */
2374 OSAL_MEMSET(¶ms, 0, sizeof(params));
2375 params.queue_id = (u8)p_queue->fw_rx_qid;
2376 params.vport_id = vf->vport_id;
2377 params.stats_id = vf->abs_vf_id + 0x10;
2379 /* Since IGU index is passed via sb_info, construct a dummy one */
2380 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2381 sb_dummy.igu_sb_id = req->hw_sb;
2382 params.p_sb = &sb_dummy;
2383 params.sb_idx = req->sb_index;
2385 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2386 vf_params.vfid = vf->relative_vf_id;
2387 vf_params.vf_qid = (u8)req->rx_qid;
2388 vf_params.vf_legacy = vf_legacy;
2389 vf_params.qid_usage_idx = qid_usage_idx;
2391 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2392 ¶ms, true, &vf_params);
2393 if (p_cid == OSAL_NULL)
2396 /* Legacy VFs have their Producers in a different location, which they
2397 * calculate on their own and clean the producer prior to this.
2399 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2401 GTT_BAR0_MAP_REG_MSDM_RAM +
2402 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2405 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2410 if (rc != ECORE_SUCCESS) {
2411 status = PFVF_STATUS_FAILURE;
2412 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2414 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2415 p_queue->cids[qid_usage_idx].b_is_tx = false;
2416 status = PFVF_STATUS_SUCCESS;
2417 vf->num_active_rxqs++;
2421 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2423 ECORE_QCID_LEGACY_VF_RX_PROD));
2427 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2428 struct ecore_tunnel_info *p_tun,
2429 u16 tunn_feature_mask)
2431 p_resp->tunn_feature_mask = tunn_feature_mask;
2432 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2433 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2434 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2435 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2436 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2437 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2438 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2439 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2440 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2441 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2442 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2443 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2447 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2448 struct ecore_tunn_update_type *p_tun,
2449 enum ecore_tunn_mode mask, u8 tun_cls)
2451 if (p_req->tun_mode_update_mask & (1 << mask)) {
2452 p_tun->b_update_mode = true;
2454 if (p_req->tunn_mode & (1 << mask))
2455 p_tun->b_mode_enabled = true;
2458 p_tun->tun_cls = tun_cls;
2462 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2463 struct ecore_tunn_update_type *p_tun,
2464 struct ecore_tunn_update_udp_port *p_port,
2465 enum ecore_tunn_mode mask,
2466 u8 tun_cls, u8 update_port, u16 port)
2469 p_port->b_update_port = true;
2470 p_port->port = port;
2473 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2477 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2479 bool b_update_requested = false;
2481 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2482 p_req->update_geneve_port || p_req->update_vxlan_port)
2483 b_update_requested = true;
2485 return b_update_requested;
2488 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2489 struct ecore_ptt *p_ptt,
2490 struct ecore_vf_info *p_vf)
2492 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2493 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2494 struct pfvf_update_tunn_param_tlv *p_resp;
2495 struct vfpf_update_tunn_param_tlv *p_req;
2496 enum _ecore_status_t rc = ECORE_SUCCESS;
2497 u8 status = PFVF_STATUS_SUCCESS;
2498 bool b_update_required = false;
2499 struct ecore_tunnel_info tunn;
2500 u16 tunn_feature_mask = 0;
2503 mbx->offset = (u8 *)mbx->reply_virt;
2505 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2506 p_req = &mbx->req_virt->tunn_param_update;
2508 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2509 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2510 "No tunnel update requested by VF\n");
2511 status = PFVF_STATUS_FAILURE;
2515 tunn.b_update_rx_cls = p_req->update_tun_cls;
2516 tunn.b_update_tx_cls = p_req->update_tun_cls;
2518 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2519 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2520 p_req->update_vxlan_port,
2522 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2523 ECORE_MODE_L2GENEVE_TUNN,
2524 p_req->l2geneve_clss,
2525 p_req->update_geneve_port,
2526 p_req->geneve_port);
2527 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2528 ECORE_MODE_IPGENEVE_TUNN,
2529 p_req->ipgeneve_clss);
2530 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2531 ECORE_MODE_L2GRE_TUNN,
2533 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2534 ECORE_MODE_IPGRE_TUNN,
2537 /* If PF modifies VF's req then it should
2538 * still return an error in case of partial configuration
2539 * or modified configuration as opposed to requested one.
2541 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2542 &b_update_required, &tunn);
2544 if (rc != ECORE_SUCCESS)
2545 status = PFVF_STATUS_FAILURE;
2547 /* If ECORE client is willing to update anything ? */
2548 if (b_update_required) {
2551 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2552 ECORE_SPQ_MODE_EBLOCK,
2554 if (rc != ECORE_SUCCESS)
2555 status = PFVF_STATUS_FAILURE;
2557 geneve_port = p_tun->geneve_port.port;
2558 ecore_for_each_vf(p_hwfn, i) {
2559 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2560 p_tun->vxlan_port.port,
2566 p_resp = ecore_add_tlv(&mbx->offset,
2567 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2569 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2570 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2571 sizeof(struct channel_list_end_tlv));
2573 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2576 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2577 struct ecore_ptt *p_ptt,
2578 struct ecore_vf_info *p_vf,
2582 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2583 struct pfvf_start_queue_resp_tlv *p_tlv;
2584 bool b_legacy = false;
2587 mbx->offset = (u8 *)mbx->reply_virt;
2589 /* Taking a bigger struct instead of adding a TLV to list was a
2590 * mistake, but one which we're now stuck with, as some older
2591 * clients assume the size of the previous response.
2593 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2594 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2598 length = sizeof(*p_tlv);
2600 length = sizeof(struct pfvf_def_resp_tlv);
2602 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2603 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2604 sizeof(struct channel_list_end_tlv));
2606 /* Update the TLV with the response */
2607 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2608 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2610 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2613 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2614 struct ecore_ptt *p_ptt,
2615 struct ecore_vf_info *vf)
2617 struct ecore_queue_start_common_params params;
2618 struct ecore_queue_cid_vf_params vf_params;
2619 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2620 u8 status = PFVF_STATUS_NO_RESOURCE;
2621 struct ecore_vf_queue *p_queue;
2622 struct vfpf_start_txq_tlv *req;
2623 struct ecore_queue_cid *p_cid;
2624 struct ecore_sb_info sb_dummy;
2625 u8 qid_usage_idx, vf_legacy;
2627 enum _ecore_status_t rc;
2630 OSAL_MEMSET(¶ms, 0, sizeof(params));
2631 req = &mbx->req_virt->start_txq;
2633 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2634 ECORE_IOV_VALIDATE_Q_NA) ||
2635 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2638 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2639 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2642 p_queue = &vf->vf_queues[req->tx_qid];
2643 if (p_queue->cids[qid_usage_idx].p_cid)
2646 vf_legacy = ecore_vf_calculate_legacy(vf);
2648 /* Acquire a new queue-cid */
2649 params.queue_id = p_queue->fw_tx_qid;
2650 params.vport_id = vf->vport_id;
2651 params.stats_id = vf->abs_vf_id + 0x10;
2653 /* Since IGU index is passed via sb_info, construct a dummy one */
2654 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2655 sb_dummy.igu_sb_id = req->hw_sb;
2656 params.p_sb = &sb_dummy;
2657 params.sb_idx = req->sb_index;
2659 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2660 vf_params.vfid = vf->relative_vf_id;
2661 vf_params.vf_qid = (u8)req->tx_qid;
2662 vf_params.vf_legacy = vf_legacy;
2663 vf_params.qid_usage_idx = qid_usage_idx;
2665 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2666 ¶ms, false, &vf_params);
2667 if (p_cid == OSAL_NULL)
2670 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2671 vf->relative_vf_id);
2672 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2673 req->pbl_addr, req->pbl_size, pq);
2674 if (rc != ECORE_SUCCESS) {
2675 status = PFVF_STATUS_FAILURE;
2676 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2678 status = PFVF_STATUS_SUCCESS;
2679 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2680 p_queue->cids[qid_usage_idx].b_is_tx = true;
2685 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2689 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2690 struct ecore_vf_info *vf,
2693 bool cqe_completion)
2695 struct ecore_vf_queue *p_queue;
2696 enum _ecore_status_t rc = ECORE_SUCCESS;
2698 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2699 ECORE_IOV_VALIDATE_Q_NA)) {
2700 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2701 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2702 vf->relative_vf_id, rxq_id, qid_usage_idx);
2706 p_queue = &vf->vf_queues[rxq_id];
2708 /* We've validated the index and the existance of the active RXQ -
2709 * now we need to make sure that it's using the correct qid.
2711 if (!p_queue->cids[qid_usage_idx].p_cid ||
2712 p_queue->cids[qid_usage_idx].b_is_tx) {
2713 struct ecore_queue_cid *p_cid;
2715 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2716 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2717 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2718 vf->relative_vf_id, rxq_id, qid_usage_idx,
2719 rxq_id, p_cid->qid_usage_idx);
2723 /* Now that we know we have a valid Rx-queue - close it */
2724 rc = ecore_eth_rx_queue_stop(p_hwfn,
2725 p_queue->cids[qid_usage_idx].p_cid,
2726 false, cqe_completion);
2727 if (rc != ECORE_SUCCESS)
2730 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2731 vf->num_active_rxqs--;
2733 return ECORE_SUCCESS;
2736 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2737 struct ecore_vf_info *vf,
2741 struct ecore_vf_queue *p_queue;
2742 enum _ecore_status_t rc = ECORE_SUCCESS;
2744 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2745 ECORE_IOV_VALIDATE_Q_NA))
2748 p_queue = &vf->vf_queues[txq_id];
2749 if (!p_queue->cids[qid_usage_idx].p_cid ||
2750 !p_queue->cids[qid_usage_idx].b_is_tx)
2753 rc = ecore_eth_tx_queue_stop(p_hwfn,
2754 p_queue->cids[qid_usage_idx].p_cid);
2755 if (rc != ECORE_SUCCESS)
2758 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2759 return ECORE_SUCCESS;
2762 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2763 struct ecore_ptt *p_ptt,
2764 struct ecore_vf_info *vf)
2766 u16 length = sizeof(struct pfvf_def_resp_tlv);
2767 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2768 u8 status = PFVF_STATUS_FAILURE;
2769 struct vfpf_stop_rxqs_tlv *req;
2771 enum _ecore_status_t rc;
2773 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2774 * would be one. Since no older ecore passed multiple queues
2775 * using this API, sanitize on the value.
2777 req = &mbx->req_virt->stop_rxqs;
2778 if (req->num_rxqs != 1) {
2779 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2780 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2781 vf->relative_vf_id);
2782 status = PFVF_STATUS_NOT_SUPPORTED;
2786 /* Find which qid-index is associated with the queue */
2787 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2788 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2791 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2792 qid_usage_idx, req->cqe_completion);
2793 if (rc == ECORE_SUCCESS)
2794 status = PFVF_STATUS_SUCCESS;
2796 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2800 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2801 struct ecore_ptt *p_ptt,
2802 struct ecore_vf_info *vf)
2804 u16 length = sizeof(struct pfvf_def_resp_tlv);
2805 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2806 u8 status = PFVF_STATUS_FAILURE;
2807 struct vfpf_stop_txqs_tlv *req;
2809 enum _ecore_status_t rc;
2811 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2812 * would be one. Since no older ecore passed multiple queues
2813 * using this API, sanitize on the value.
2815 req = &mbx->req_virt->stop_txqs;
2816 if (req->num_txqs != 1) {
2817 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2818 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2819 vf->relative_vf_id);
2820 status = PFVF_STATUS_NOT_SUPPORTED;
2824 /* Find which qid-index is associated with the queue */
2825 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2826 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2829 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2831 if (rc == ECORE_SUCCESS)
2832 status = PFVF_STATUS_SUCCESS;
2835 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2839 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2840 struct ecore_ptt *p_ptt,
2841 struct ecore_vf_info *vf)
2843 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2844 u16 length = sizeof(struct pfvf_def_resp_tlv);
2845 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2846 struct vfpf_update_rxq_tlv *req;
2847 u8 status = PFVF_STATUS_FAILURE;
2848 u8 complete_event_flg;
2849 u8 complete_cqe_flg;
2851 enum _ecore_status_t rc;
2854 req = &mbx->req_virt->update_rxq;
2855 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2856 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2858 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2859 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2862 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2863 * expecting a single queue at a time. Validate this.
2865 if ((vf->acquire.vfdev_info.capabilities &
2866 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2867 req->num_rxqs != 1) {
2868 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2869 "VF[%d] supports QIDs but sends multiple queues\n",
2870 vf->relative_vf_id);
2874 /* Validate inputs - for the legacy case this is still true since
2875 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2877 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2878 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2879 ECORE_IOV_VALIDATE_Q_NA) ||
2880 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2881 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2882 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2883 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2884 vf->relative_vf_id, req->rx_qid,
2890 for (i = 0; i < req->num_rxqs; i++) {
2891 u16 qid = req->rx_qid + i;
2893 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2896 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2900 ECORE_SPQ_MODE_EBLOCK,
2902 if (rc != ECORE_SUCCESS)
2905 status = PFVF_STATUS_SUCCESS;
2907 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2911 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2912 void *p_tlvs_list, u16 req_type)
2914 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2918 if (!p_tlv->length) {
2919 DP_NOTICE(p_hwfn, true,
2920 "Zero length TLV found\n");
2924 if (p_tlv->type == req_type) {
2925 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2926 "Extended tlv type %s, length %d found\n",
2927 ecore_channel_tlvs_string[p_tlv->type],
2932 len += p_tlv->length;
2933 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2935 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2936 DP_NOTICE(p_hwfn, true,
2937 "TLVs has overrun the buffer size\n");
2940 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2946 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2947 struct ecore_sp_vport_update_params *p_data,
2948 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2950 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2951 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2953 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2954 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2959 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2960 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2961 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2962 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2963 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2967 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2968 struct ecore_sp_vport_update_params *p_data,
2969 struct ecore_vf_info *p_vf,
2970 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2972 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2973 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2975 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2976 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2981 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2983 /* Ignore the VF request if we're forcing a vlan */
2984 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2985 p_data->update_inner_vlan_removal_flg = 1;
2986 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2989 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2993 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2994 struct ecore_sp_vport_update_params *p_data,
2995 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2997 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2998 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
3000 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
3001 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3003 if (!p_tx_switch_tlv)
3007 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3008 DP_NOTICE(p_hwfn, false, "FPGA: Ignore tx-switching configuration originating from VFs\n");
3013 p_data->update_tx_switching_flg = 1;
3014 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
3015 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
3019 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
3020 struct ecore_sp_vport_update_params *p_data,
3021 struct ecore_iov_vf_mbx *p_mbx,
3024 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
3025 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
3027 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
3028 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3033 p_data->update_approx_mcast_flg = 1;
3034 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
3035 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
3036 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
3040 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
3041 struct ecore_sp_vport_update_params *p_data,
3042 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3044 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
3045 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
3046 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
3048 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
3049 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3054 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3055 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3056 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3057 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3058 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3062 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3063 struct ecore_sp_vport_update_params *p_data,
3064 struct ecore_iov_vf_mbx *p_mbx,
3067 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3068 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3070 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3071 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3073 if (!p_accept_any_vlan)
3076 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3077 p_data->update_accept_any_vlan_flg =
3078 p_accept_any_vlan->update_accept_any_vlan_flg;
3079 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3083 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3084 struct ecore_vf_info *vf,
3085 struct ecore_sp_vport_update_params *p_data,
3086 struct ecore_rss_params *p_rss,
3087 struct ecore_iov_vf_mbx *p_mbx,
3088 u16 *tlvs_mask, u16 *tlvs_accepted)
3090 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3091 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3092 bool b_reject = false;
3096 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3097 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
3100 p_data->rss_params = OSAL_NULL;
3104 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3106 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
3107 VFPF_UPDATE_RSS_CONFIG_FLAG);
3108 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
3109 VFPF_UPDATE_RSS_CAPS_FLAG);
3110 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
3111 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3112 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
3113 VFPF_UPDATE_RSS_KEY_FLAG);
3115 p_rss->rss_enable = p_rss_tlv->rss_enable;
3116 p_rss->rss_eng_id = vf->rss_eng_id;
3117 p_rss->rss_caps = p_rss_tlv->rss_caps;
3118 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3119 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3120 sizeof(p_rss->rss_key));
3122 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3123 (1 << p_rss_tlv->rss_table_size_log));
3125 for (i = 0; i < table_size; i++) {
3126 struct ecore_queue_cid *p_cid;
3128 q_idx = p_rss_tlv->rss_ind_table[i];
3129 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3130 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3131 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3132 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3133 vf->relative_vf_id, q_idx);
3138 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3139 p_rss->rss_ind_table[i] = p_cid;
3142 p_data->rss_params = p_rss;
3144 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3146 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3150 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3151 struct ecore_sp_vport_update_params *p_data,
3152 struct ecore_sge_tpa_params *p_sge_tpa,
3153 struct ecore_iov_vf_mbx *p_mbx,
3156 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3157 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3159 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3160 ecore_iov_search_list_tlvs(p_hwfn,
3161 p_mbx->req_virt, tlv);
3163 if (!p_sge_tpa_tlv) {
3164 p_data->sge_tpa_params = OSAL_NULL;
3168 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3170 p_sge_tpa->update_tpa_en_flg =
3171 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3172 VFPF_UPDATE_TPA_EN_FLAG);
3173 p_sge_tpa->update_tpa_param_flg =
3174 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3175 VFPF_UPDATE_TPA_PARAM_FLAG);
3177 p_sge_tpa->tpa_ipv4_en_flg =
3178 !!(p_sge_tpa_tlv->sge_tpa_flags &
3179 VFPF_TPA_IPV4_EN_FLAG);
3180 p_sge_tpa->tpa_ipv6_en_flg =
3181 !!(p_sge_tpa_tlv->sge_tpa_flags &
3182 VFPF_TPA_IPV6_EN_FLAG);
3183 p_sge_tpa->tpa_pkt_split_flg =
3184 !!(p_sge_tpa_tlv->sge_tpa_flags &
3185 VFPF_TPA_PKT_SPLIT_FLAG);
3186 p_sge_tpa->tpa_hdr_data_split_flg =
3187 !!(p_sge_tpa_tlv->sge_tpa_flags &
3188 VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3189 p_sge_tpa->tpa_gro_consistent_flg =
3190 !!(p_sge_tpa_tlv->sge_tpa_flags &
3191 VFPF_TPA_GRO_CONSIST_FLAG);
3193 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3194 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3195 p_sge_tpa->tpa_min_size_to_start =
3196 p_sge_tpa_tlv->tpa_min_size_to_start;
3197 p_sge_tpa->tpa_min_size_to_cont =
3198 p_sge_tpa_tlv->tpa_min_size_to_cont;
3199 p_sge_tpa->max_buffers_per_cqe =
3200 p_sge_tpa_tlv->max_buffers_per_cqe;
3202 p_data->sge_tpa_params = p_sge_tpa;
3204 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3207 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3208 struct ecore_ptt *p_ptt,
3209 struct ecore_vf_info *vf)
3211 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3212 struct ecore_sp_vport_update_params params;
3213 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3214 struct ecore_sge_tpa_params sge_tpa_params;
3215 u16 tlvs_mask = 0, tlvs_accepted = 0;
3216 u8 status = PFVF_STATUS_SUCCESS;
3218 enum _ecore_status_t rc;
3220 /* Valiate PF can send such a request */
3221 if (!vf->vport_instance) {
3222 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3223 "No VPORT instance available for VF[%d], failing vport update\n",
3225 status = PFVF_STATUS_FAILURE;
3229 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3230 if (p_rss_params == OSAL_NULL) {
3231 status = PFVF_STATUS_FAILURE;
3235 OSAL_MEMSET(¶ms, 0, sizeof(params));
3236 params.opaque_fid = vf->opaque_fid;
3237 params.vport_id = vf->vport_id;
3238 params.rss_params = OSAL_NULL;
3240 /* Search for extended tlvs list and update values
3241 * from VF in struct ecore_sp_vport_update_params.
3243 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3244 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3245 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3246 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3247 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3248 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3249 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3250 &sge_tpa_params, mbx, &tlvs_mask);
3252 tlvs_accepted = tlvs_mask;
3254 /* Some of the extended TLVs need to be validated first; In that case,
3255 * they can update the mask without updating the accepted [so that
3256 * PF could communicate to VF it has rejected request].
3258 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3259 mbx, &tlvs_mask, &tlvs_accepted);
3261 /* Just log a message if there is no single extended tlv in buffer.
3262 * When all features of vport update ramrod would be requested by VF
3263 * as extended TLVs in buffer then an error can be returned in response
3264 * if there is no extended TLV present in buffer.
3266 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3267 ¶ms, &tlvs_accepted) !=
3270 status = PFVF_STATUS_NOT_SUPPORTED;
3274 if (!tlvs_accepted) {
3276 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3277 "Upper-layer prevents said VF configuration\n");
3279 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3280 "No feature tlvs found for vport update\n");
3281 status = PFVF_STATUS_NOT_SUPPORTED;
3285 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3289 status = PFVF_STATUS_FAILURE;
3292 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3293 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3294 tlvs_mask, tlvs_accepted);
3295 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3298 static enum _ecore_status_t ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3299 struct ecore_vf_info *p_vf,
3300 struct ecore_filter_ucast *p_params)
3304 /* First remove entries and then add new ones */
3305 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3306 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3307 if (p_vf->shadow_config.vlans[i].used &&
3308 p_vf->shadow_config.vlans[i].vid ==
3310 p_vf->shadow_config.vlans[i].used = false;
3313 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3314 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3315 "VF [%d] - Tries to remove a non-existing vlan\n",
3316 p_vf->relative_vf_id);
3319 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3320 p_params->opcode == ECORE_FILTER_FLUSH) {
3321 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3322 p_vf->shadow_config.vlans[i].used = false;
3325 /* In forced mode, we're willing to remove entries - but we don't add
3328 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3329 return ECORE_SUCCESS;
3331 if (p_params->opcode == ECORE_FILTER_ADD ||
3332 p_params->opcode == ECORE_FILTER_REPLACE) {
3333 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3334 if (p_vf->shadow_config.vlans[i].used)
3337 p_vf->shadow_config.vlans[i].used = true;
3338 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3342 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3343 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3344 "VF [%d] - Tries to configure more than %d vlan filters\n",
3345 p_vf->relative_vf_id,
3346 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3351 return ECORE_SUCCESS;
3354 static enum _ecore_status_t ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3355 struct ecore_vf_info *p_vf,
3356 struct ecore_filter_ucast *p_params)
3358 char empty_mac[ETH_ALEN];
3361 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3363 /* If we're in forced-mode, we don't allow any change */
3364 /* TODO - this would change if we were ever to implement logic for
3365 * removing a forced MAC altogether [in which case, like for vlans,
3366 * we should be able to re-trace previous configuration.
3368 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3369 return ECORE_SUCCESS;
3371 /* First remove entries and then add new ones */
3372 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3373 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3374 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3375 p_params->mac, ETH_ALEN)) {
3376 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3382 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3383 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3384 "MAC isn't configured\n");
3387 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3388 p_params->opcode == ECORE_FILTER_FLUSH) {
3389 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3390 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3393 /* List the new MAC address */
3394 if (p_params->opcode != ECORE_FILTER_ADD &&
3395 p_params->opcode != ECORE_FILTER_REPLACE)
3396 return ECORE_SUCCESS;
3398 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3399 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3400 empty_mac, ETH_ALEN)) {
3401 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3402 p_params->mac, ETH_ALEN);
3403 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3404 "Added MAC at %d entry in shadow\n", i);
3409 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3410 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3411 "No available place for MAC\n");
3415 return ECORE_SUCCESS;
3418 static enum _ecore_status_t
3419 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3420 struct ecore_vf_info *p_vf,
3421 struct ecore_filter_ucast *p_params)
3423 enum _ecore_status_t rc = ECORE_SUCCESS;
3425 if (p_params->type == ECORE_FILTER_MAC) {
3426 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3427 if (rc != ECORE_SUCCESS)
3431 if (p_params->type == ECORE_FILTER_VLAN)
3432 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3437 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3438 struct ecore_ptt *p_ptt,
3439 struct ecore_vf_info *vf)
3441 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3442 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3443 struct vfpf_ucast_filter_tlv *req;
3444 u8 status = PFVF_STATUS_SUCCESS;
3445 struct ecore_filter_ucast params;
3446 enum _ecore_status_t rc;
3448 /* Prepare the unicast filter params */
3449 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3450 req = &mbx->req_virt->ucast_filter;
3451 params.opcode = (enum ecore_filter_opcode)req->opcode;
3452 params.type = (enum ecore_filter_ucast_type)req->type;
3454 /* @@@TBD - We might need logic on HV side in determining this */
3455 params.is_rx_filter = 1;
3456 params.is_tx_filter = 1;
3457 params.vport_to_remove_from = vf->vport_id;
3458 params.vport_to_add_to = vf->vport_id;
3459 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3460 params.vlan = req->vlan;
3462 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3463 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3464 vf->abs_vf_id, params.opcode, params.type,
3465 params.is_rx_filter ? "RX" : "",
3466 params.is_tx_filter ? "TX" : "",
3467 params.vport_to_add_to,
3468 params.mac[0], params.mac[1], params.mac[2],
3469 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3471 if (!vf->vport_instance) {
3472 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3473 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3475 status = PFVF_STATUS_FAILURE;
3479 /* Update shadow copy of the VF configuration. In case shadow indicates
3480 * the action should be blocked return success to VF to imitate the
3481 * firmware behaviour in such case.
3483 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3487 /* Determine if the unicast filtering is acceptible by PF */
3488 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3489 (params.type == ECORE_FILTER_VLAN ||
3490 params.type == ECORE_FILTER_MAC_VLAN)) {
3491 /* Once VLAN is forced or PVID is set, do not allow
3492 * to add/replace any further VLANs.
3494 if (params.opcode == ECORE_FILTER_ADD ||
3495 params.opcode == ECORE_FILTER_REPLACE)
3496 status = PFVF_STATUS_FORCED;
3500 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3501 (params.type == ECORE_FILTER_MAC ||
3502 params.type == ECORE_FILTER_MAC_VLAN)) {
3503 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3504 (params.opcode != ECORE_FILTER_ADD &&
3505 params.opcode != ECORE_FILTER_REPLACE))
3506 status = PFVF_STATUS_FORCED;
3510 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3511 if (rc == ECORE_EXISTS) {
3513 } else if (rc == ECORE_INVAL) {
3514 status = PFVF_STATUS_FAILURE;
3518 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3519 ECORE_SPQ_MODE_CB, OSAL_NULL);
3521 status = PFVF_STATUS_FAILURE;
3524 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3525 sizeof(struct pfvf_def_resp_tlv), status);
3528 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3529 struct ecore_ptt *p_ptt,
3530 struct ecore_vf_info *vf)
3535 for (i = 0; i < vf->num_sbs; i++)
3536 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3538 vf->opaque_fid, false);
3540 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3541 sizeof(struct pfvf_def_resp_tlv),
3542 PFVF_STATUS_SUCCESS);
3545 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3546 struct ecore_ptt *p_ptt,
3547 struct ecore_vf_info *vf)
3549 u16 length = sizeof(struct pfvf_def_resp_tlv);
3550 u8 status = PFVF_STATUS_SUCCESS;
3552 /* Disable Interrupts for VF */
3553 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3555 /* Reset Permission table */
3556 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3558 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3562 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3563 struct ecore_ptt *p_ptt,
3564 struct ecore_vf_info *p_vf)
3566 u16 length = sizeof(struct pfvf_def_resp_tlv);
3567 u8 status = PFVF_STATUS_SUCCESS;
3568 enum _ecore_status_t rc = ECORE_SUCCESS;
3570 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3572 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3573 /* Stopping the VF */
3574 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3577 if (rc != ECORE_SUCCESS) {
3578 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3580 status = PFVF_STATUS_FAILURE;
3583 p_vf->state = VF_STOPPED;
3586 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3590 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3591 struct ecore_ptt *p_ptt,
3592 struct ecore_vf_info *p_vf)
3594 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3595 struct pfvf_read_coal_resp_tlv *p_resp;
3596 struct vfpf_read_coal_req_tlv *req;
3597 u8 status = PFVF_STATUS_FAILURE;
3598 struct ecore_vf_queue *p_queue;
3599 struct ecore_queue_cid *p_cid;
3600 enum _ecore_status_t rc = ECORE_SUCCESS;
3601 u16 coal = 0, qid, i;
3604 mbx->offset = (u8 *)mbx->reply_virt;
3605 req = &mbx->req_virt->read_coal_req;
3608 b_is_rx = req->is_rx ? true : false;
3611 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3612 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3613 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3614 "VF[%d]: Invalid Rx queue_id = %d\n",
3615 p_vf->abs_vf_id, qid);
3619 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3620 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3621 if (rc != ECORE_SUCCESS)
3624 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3625 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3626 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3627 "VF[%d]: Invalid Tx queue_id = %d\n",
3628 p_vf->abs_vf_id, qid);
3631 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3632 p_queue = &p_vf->vf_queues[qid];
3633 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3634 (!p_queue->cids[i].b_is_tx))
3637 p_cid = p_queue->cids[i].p_cid;
3639 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3641 if (rc != ECORE_SUCCESS)
3647 status = PFVF_STATUS_SUCCESS;
3650 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3652 p_resp->coal = coal;
3654 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3655 sizeof(struct channel_list_end_tlv));
3657 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3660 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3661 struct ecore_ptt *p_ptt,
3662 struct ecore_vf_info *vf)
3664 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3665 enum _ecore_status_t rc = ECORE_SUCCESS;
3666 struct vfpf_update_coalesce *req;
3667 u8 status = PFVF_STATUS_FAILURE;
3668 struct ecore_queue_cid *p_cid;
3669 u16 rx_coal, tx_coal;
3673 req = &mbx->req_virt->update_coalesce;
3675 rx_coal = req->rx_coal;
3676 tx_coal = req->tx_coal;
3679 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3680 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3682 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3683 vf->abs_vf_id, qid);
3687 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3688 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3690 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3691 vf->abs_vf_id, qid);
3695 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3696 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3697 vf->abs_vf_id, rx_coal, tx_coal, qid);
3700 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3702 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3703 if (rc != ECORE_SUCCESS) {
3704 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3705 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3706 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3709 vf->rx_coal = rx_coal;
3712 /* TODO - in future, it might be possible to pass this in a per-cid
3713 * granularity. For now, do this for all Tx queues.
3716 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3718 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3719 if (p_queue->cids[i].p_cid == OSAL_NULL)
3722 if (!p_queue->cids[i].b_is_tx)
3725 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3726 p_queue->cids[i].p_cid);
3727 if (rc != ECORE_SUCCESS) {
3728 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3729 "VF[%d]: Unable to set tx queue coalesce\n",
3734 vf->tx_coal = tx_coal;
3737 status = PFVF_STATUS_SUCCESS;
3739 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3740 sizeof(struct pfvf_def_resp_tlv), status);
3743 enum _ecore_status_t
3744 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3745 u16 rx_coal, u16 tx_coal,
3748 struct ecore_queue_cid *p_cid;
3749 struct ecore_vf_info *vf;
3750 struct ecore_ptt *p_ptt;
3753 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3754 DP_NOTICE(p_hwfn, true,
3755 "VF[%d] - Can not set coalescing: VF is not active\n",
3760 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3761 p_ptt = ecore_ptt_acquire(p_hwfn);
3765 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3766 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3768 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3769 vf->abs_vf_id, qid);
3773 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3774 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3776 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3777 vf->abs_vf_id, qid);
3781 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3782 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3783 vf->abs_vf_id, rx_coal, tx_coal, qid);
3786 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3788 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3789 if (rc != ECORE_SUCCESS) {
3790 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3791 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3792 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3795 vf->rx_coal = rx_coal;
3798 /* TODO - in future, it might be possible to pass this in a per-cid
3799 * granularity. For now, do this for all Tx queues.
3802 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3804 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3805 if (p_queue->cids[i].p_cid == OSAL_NULL)
3808 if (!p_queue->cids[i].b_is_tx)
3811 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3812 p_queue->cids[i].p_cid);
3813 if (rc != ECORE_SUCCESS) {
3814 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3815 "VF[%d]: Unable to set tx queue coalesce\n",
3820 vf->tx_coal = tx_coal;
3824 ecore_ptt_release(p_hwfn, p_ptt);
3829 static enum _ecore_status_t
3830 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3831 struct ecore_vf_info *p_vf,
3832 struct ecore_ptt *p_ptt)
3837 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3839 for (cnt = 0; cnt < 50; cnt++) {
3840 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3845 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3848 DP_ERR(p_hwfn, "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3849 p_vf->abs_vf_id, val);
3850 return ECORE_TIMEOUT;
3853 return ECORE_SUCCESS;
3856 static enum _ecore_status_t
3857 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3858 struct ecore_vf_info *p_vf,
3859 struct ecore_ptt *p_ptt)
3861 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3864 /* Read initial consumers & producers */
3865 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3868 cons[i] = ecore_rd(p_hwfn, p_ptt,
3869 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3871 prod = ecore_rd(p_hwfn, p_ptt,
3872 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3874 distance[i] = prod - cons[i];
3877 /* Wait for consumers to pass the producers */
3879 for (cnt = 0; cnt < 50; cnt++) {
3880 for (; i < MAX_NUM_VOQS_E4; i++) {
3883 tmp = ecore_rd(p_hwfn, p_ptt,
3884 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3886 if (distance[i] > tmp - cons[i])
3890 if (i == MAX_NUM_VOQS_E4)
3897 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3898 p_vf->abs_vf_id, i);
3899 return ECORE_TIMEOUT;
3902 return ECORE_SUCCESS;
3905 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3906 struct ecore_vf_info *p_vf,
3907 struct ecore_ptt *p_ptt)
3909 enum _ecore_status_t rc;
3911 /* TODO - add SRC and TM polling once we add storage IOV */
3913 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3917 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3921 return ECORE_SUCCESS;
3924 static enum _ecore_status_t
3925 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3926 struct ecore_ptt *p_ptt,
3930 struct ecore_vf_info *p_vf;
3931 enum _ecore_status_t rc = ECORE_SUCCESS;
3933 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3935 return ECORE_SUCCESS;
3937 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3938 (1ULL << (rel_vf_id % 64))) {
3939 u16 vfid = p_vf->abs_vf_id;
3941 /* TODO - should we lock channel? */
3943 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3944 "VF[%d] - Handling FLR\n", vfid);
3946 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3948 /* If VF isn't active, no need for anything but SW */
3952 /* TODO - what to do in case of failure? */
3953 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3954 if (rc != ECORE_SUCCESS)
3957 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3959 /* TODO - what's now? What a mess.... */
3960 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n",
3965 /* Workaround to make VF-PF channel ready, as FW
3966 * doesn't do that as a part of FLR.
3969 GTT_BAR0_MAP_REG_USDM_RAM +
3970 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3972 /* VF_STOPPED has to be set only after final cleanup
3973 * but prior to re-enabling the VF.
3975 p_vf->state = VF_STOPPED;
3977 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3979 /* TODO - again, a mess... */
3980 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3985 /* Mark VF for ack and clean pending state */
3986 if (p_vf->state == VF_RESET)
3987 p_vf->state = VF_STOPPED;
3988 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3989 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3990 ~(1ULL << (rel_vf_id % 64));
3991 p_vf->vf_mbx.b_pending_msg = false;
3997 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3998 struct ecore_ptt *p_ptt)
4001 u32 ack_vfs[VF_MAX_STATIC / 32];
4002 enum _ecore_status_t rc = ECORE_SUCCESS;
4005 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
4007 /* Since BRB <-> PRS interface can't be tested as part of the flr
4008 * polling due to HW limitations, simply sleep a bit. And since
4009 * there's no need to wait per-vf, do it before looping.
4013 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
4014 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
4016 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4020 #ifndef LINUX_REMOVE
4021 enum _ecore_status_t
4022 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4023 struct ecore_ptt *p_ptt,
4027 u32 ack_vfs[VF_MAX_STATIC / 32];
4028 enum _ecore_status_t rc = ECORE_SUCCESS;
4030 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
4032 /* Wait instead of polling the BRB <-> PRS interface */
4035 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
4037 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4042 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
4043 u32 *p_disabled_vfs)
4048 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
4049 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
4050 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4051 "[%08x,...,%08x]: %08x\n",
4052 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
4054 if (!p_hwfn->p_dev->p_iov_info) {
4055 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
4060 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
4061 struct ecore_vf_info *p_vf;
4064 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4068 vfid = p_vf->abs_vf_id;
4069 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4070 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4071 u16 rel_vf_id = p_vf->relative_vf_id;
4073 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4074 "VF[%d] [rel %d] got FLR-ed\n",
4077 p_vf->state = VF_RESET;
4079 /* No need to lock here, since pending_flr should
4080 * only change here and before ACKing MFw. Since
4081 * MFW will not trigger an additional attention for
4082 * VF flr until ACKs, we're safe.
4084 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4092 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4094 struct ecore_mcp_link_params *p_params,
4095 struct ecore_mcp_link_state *p_link,
4096 struct ecore_mcp_link_capabilities *p_caps)
4098 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4099 struct ecore_bulletin_content *p_bulletin;
4104 p_bulletin = p_vf->bulletin.p_virt;
4107 __ecore_vf_get_link_params(p_params, p_bulletin);
4109 __ecore_vf_get_link_state(p_link, p_bulletin);
4111 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4114 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4115 struct ecore_ptt *p_ptt,
4118 struct ecore_iov_vf_mbx *mbx;
4119 struct ecore_vf_info *p_vf;
4121 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4125 mbx = &p_vf->vf_mbx;
4127 /* ecore_iov_process_mbx_request */
4128 #ifndef CONFIG_ECORE_SW_CHANNEL
4129 if (!mbx->b_pending_msg) {
4130 DP_NOTICE(p_hwfn, true,
4131 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4135 mbx->b_pending_msg = false;
4138 mbx->first_tlv = mbx->req_virt->first_tlv;
4140 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4141 "VF[%02x]: Processing mailbox message [type %04x]\n",
4142 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4144 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4145 p_vf->relative_vf_id,
4146 mbx->first_tlv.tl.type);
4148 /* Lock the per vf op mutex and note the locker's identity.
4149 * The unlock will take place in mbx response.
4151 ecore_iov_lock_vf_pf_channel(p_hwfn, p_vf,
4152 mbx->first_tlv.tl.type);
4154 /* check if tlv type is known */
4155 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4156 !p_vf->b_malicious) {
4157 /* switch on the opcode */
4158 switch (mbx->first_tlv.tl.type) {
4159 case CHANNEL_TLV_ACQUIRE:
4160 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4162 case CHANNEL_TLV_VPORT_START:
4163 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4165 case CHANNEL_TLV_VPORT_TEARDOWN:
4166 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4168 case CHANNEL_TLV_START_RXQ:
4169 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4171 case CHANNEL_TLV_START_TXQ:
4172 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4174 case CHANNEL_TLV_STOP_RXQS:
4175 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4177 case CHANNEL_TLV_STOP_TXQS:
4178 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4180 case CHANNEL_TLV_UPDATE_RXQ:
4181 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4183 case CHANNEL_TLV_VPORT_UPDATE:
4184 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4186 case CHANNEL_TLV_UCAST_FILTER:
4187 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4189 case CHANNEL_TLV_CLOSE:
4190 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4192 case CHANNEL_TLV_INT_CLEANUP:
4193 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4195 case CHANNEL_TLV_RELEASE:
4196 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4198 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4199 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4201 case CHANNEL_TLV_COALESCE_UPDATE:
4202 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4204 case CHANNEL_TLV_COALESCE_READ:
4205 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4208 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4209 /* If we've received a message from a VF we consider malicious
4210 * we ignore the messasge unless it's one for RELEASE, in which
4211 * case we'll let it have the benefit of doubt, allowing the
4212 * next loaded driver to start again.
4214 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4215 /* TODO - initiate FLR, remove malicious indication */
4216 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4217 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4220 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4221 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4222 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4225 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4226 mbx->first_tlv.tl.type,
4227 sizeof(struct pfvf_def_resp_tlv),
4228 PFVF_STATUS_MALICIOUS);
4230 /* unknown TLV - this may belong to a VF driver from the future
4231 * - a version written after this PF driver was written, which
4232 * supports features unknown as of yet. Too bad since we don't
4233 * support them. Or this may be because someone wrote a crappy
4234 * VF driver and is sending garbage over the channel.
4236 DP_NOTICE(p_hwfn, false,
4237 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
4239 mbx->first_tlv.tl.type,
4240 mbx->first_tlv.tl.length,
4241 mbx->first_tlv.padding,
4242 (unsigned long long)mbx->first_tlv.reply_address);
4244 /* Try replying in case reply address matches the acquisition's
4247 if (p_vf->acquire.first_tlv.reply_address &&
4248 (mbx->first_tlv.reply_address ==
4249 p_vf->acquire.first_tlv.reply_address))
4250 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4251 mbx->first_tlv.tl.type,
4252 sizeof(struct pfvf_def_resp_tlv),
4253 PFVF_STATUS_NOT_SUPPORTED);
4255 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4256 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
4260 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4261 mbx->first_tlv.tl.type);
4263 #ifdef CONFIG_ECORE_SW_CHANNEL
4264 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4265 mbx->sw_mbx.response_offset = 0;
4269 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4274 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4276 ecore_for_each_vf(p_hwfn, i) {
4277 struct ecore_vf_info *p_vf;
4279 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4280 if (p_vf->vf_mbx.b_pending_msg)
4281 events[i / 64] |= 1ULL << (i % 64);
4285 static struct ecore_vf_info *
4286 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4288 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4290 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4291 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4292 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4297 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4300 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4302 struct regpair *vf_msg)
4304 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4308 return ECORE_SUCCESS;
4310 /* List the physical address of the request so that handler
4311 * could later on copy the message from it.
4313 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) |
4316 p_vf->vf_mbx.b_pending_msg = true;
4318 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4321 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4322 struct malicious_vf_eqe_data *p_data)
4324 struct ecore_vf_info *p_vf;
4326 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4331 if (!p_vf->b_malicious) {
4332 DP_NOTICE(p_hwfn, false,
4333 "VF [%d] - Malicious behavior [%02x]\n",
4334 p_vf->abs_vf_id, p_data->err_id);
4336 p_vf->b_malicious = true;
4339 "VF [%d] - Malicious behavior [%02x]\n",
4340 p_vf->abs_vf_id, p_data->err_id);
4343 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4346 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4349 union event_ring_data *data,
4350 u8 OSAL_UNUSED fw_return_code)
4353 case COMMON_EVENT_VF_PF_CHANNEL:
4354 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4355 &data->vf_pf_channel.msg_addr);
4356 case COMMON_EVENT_VF_FLR:
4357 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4358 "VF-FLR is still not supported\n");
4359 return ECORE_SUCCESS;
4360 case COMMON_EVENT_MALICIOUS_VF:
4361 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4362 return ECORE_SUCCESS;
4364 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4370 #ifndef LINUX_REMOVE
4371 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
4374 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4375 (1ULL << (rel_vf_id % 64)));
4379 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4381 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4387 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4388 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4392 return MAX_NUM_VFS_E4;
4395 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4396 struct ecore_ptt *ptt,
4399 struct ecore_dmae_params params;
4400 struct ecore_vf_info *vf_info;
4402 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4406 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4407 params.flags = ECORE_DMAE_FLAG_VF_SRC |
4408 ECORE_DMAE_FLAG_COMPLETION_DST;
4409 params.src_vfid = vf_info->abs_vf_id;
4411 if (ecore_dmae_host2host(p_hwfn, ptt,
4412 vf_info->vf_mbx.pending_req,
4413 vf_info->vf_mbx.req_phys,
4414 sizeof(union vfpf_tlvs) / 4,
4416 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4417 "Failed to copy message from VF 0x%02x\n",
4423 return ECORE_SUCCESS;
4426 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4429 struct ecore_vf_info *vf_info;
4432 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4434 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n",
4438 if (vf_info->b_malicious) {
4439 DP_NOTICE(p_hwfn->p_dev, false, "Can't set forced MAC to malicious VF [%d]\n",
4444 feature = 1 << MAC_ADDR_FORCED;
4445 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4448 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4449 /* Forced MAC will disable MAC_ADDR */
4450 vf_info->bulletin.p_virt->valid_bitmap &=
4451 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4453 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4456 #ifndef LINUX_REMOVE
4457 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4460 struct ecore_vf_info *vf_info;
4463 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4465 DP_NOTICE(p_hwfn->p_dev, true, "Can not set MAC, invalid vfid [%d]\n",
4469 if (vf_info->b_malicious) {
4470 DP_NOTICE(p_hwfn->p_dev, false, "Can't set MAC to malicious VF [%d]\n",
4475 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4476 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Can not set MAC, Forced MAC is configured\n");
4480 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4481 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4484 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4486 return ECORE_SUCCESS;
4489 enum _ecore_status_t
4490 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4491 bool b_untagged_only,
4494 struct ecore_vf_info *vf_info;
4497 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4499 DP_NOTICE(p_hwfn->p_dev, true,
4500 "Can not set untagged default, invalid vfid [%d]\n",
4504 if (vf_info->b_malicious) {
4505 DP_NOTICE(p_hwfn->p_dev, false,
4506 "Can't set untagged default to malicious VF [%d]\n",
4511 /* Since this is configurable only during vport-start, don't take it
4512 * if we're past that point.
4514 if (vf_info->state == VF_ENABLED) {
4515 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4516 "Can't support untagged change for vfid[%d] - VF is already active\n",
4521 /* Set configuration; This will later be taken into account during the
4522 * VF initialization.
4524 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4525 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4526 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4528 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4531 return ECORE_SUCCESS;
4534 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4537 struct ecore_vf_info *vf_info;
4539 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4543 *opaque_fid = vf_info->opaque_fid;
4547 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4550 struct ecore_vf_info *vf_info;
4553 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4555 DP_NOTICE(p_hwfn->p_dev, true, "Can not set forced MAC, invalid vfid [%d]\n",
4559 if (vf_info->b_malicious) {
4560 DP_NOTICE(p_hwfn->p_dev, false,
4561 "Can't set forced vlan to malicious VF [%d]\n",
4566 feature = 1 << VLAN_ADDR_FORCED;
4567 vf_info->bulletin.p_virt->pvid = pvid;
4569 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4571 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4573 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4576 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4577 int vfid, u16 vxlan_port, u16 geneve_port)
4579 struct ecore_vf_info *vf_info;
4581 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4583 DP_NOTICE(p_hwfn->p_dev, true,
4584 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4588 if (vf_info->b_malicious) {
4589 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4590 "Can not set udp ports to malicious VF [%d]\n",
4595 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4596 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4599 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4601 struct ecore_vf_info *p_vf_info;
4603 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4607 return !!p_vf_info->vport_instance;
4610 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4612 struct ecore_vf_info *p_vf_info;
4614 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4618 return p_vf_info->state == VF_STOPPED;
4621 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4623 struct ecore_vf_info *vf_info;
4625 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4629 return vf_info->spoof_chk;
4632 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4635 struct ecore_vf_info *vf;
4636 enum _ecore_status_t rc = ECORE_INVAL;
4638 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4639 DP_NOTICE(p_hwfn, true,
4640 "SR-IOV sanity check failed, can't set spoofchk\n");
4644 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4648 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4649 /* After VF VPORT start PF will configure spoof check */
4650 vf->req_spoofchk_val = val;
4655 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4661 #ifndef LINUX_REMOVE
4662 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4664 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4666 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4667 : ECORE_MAX_VF_CHAINS_PER_PF;
4669 return max_chains_per_vf;
4672 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4674 void **pp_req_virt_addr,
4675 u16 *p_req_virt_size)
4677 struct ecore_vf_info *vf_info =
4678 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4683 if (pp_req_virt_addr)
4684 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4686 if (p_req_virt_size)
4687 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4690 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4692 void **pp_reply_virt_addr,
4693 u16 *p_reply_virt_size)
4695 struct ecore_vf_info *vf_info =
4696 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4701 if (pp_reply_virt_addr)
4702 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4704 if (p_reply_virt_size)
4705 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4708 #ifdef CONFIG_ECORE_SW_CHANNEL
4709 struct ecore_iov_sw_mbx*
4710 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4713 struct ecore_vf_info *vf_info =
4714 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4719 return &vf_info->vf_mbx.sw_mbx;
4723 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4725 return (length >= sizeof(struct vfpf_first_tlv) &&
4726 (length <= sizeof(union vfpf_tlvs)));
4729 u32 ecore_iov_pfvf_msg_length(void)
4731 return sizeof(union pfvf_tlvs);
4735 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
4738 struct ecore_vf_info *p_vf;
4740 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4741 if (!p_vf || !p_vf->bulletin.p_virt)
4744 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4747 return p_vf->bulletin.p_virt->mac;
4750 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4753 struct ecore_vf_info *p_vf;
4755 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4756 if (!p_vf || !p_vf->bulletin.p_virt)
4759 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4762 return p_vf->bulletin.p_virt->pvid;
4765 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4766 struct ecore_ptt *p_ptt,
4769 struct ecore_mcp_link_state *p_link;
4770 struct ecore_vf_info *vf;
4772 enum _ecore_status_t rc;
4774 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4779 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4780 if (rc != ECORE_SUCCESS)
4783 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
4785 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
4789 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4792 struct ecore_vf_info *vf;
4796 for_each_hwfn(p_dev, i) {
4797 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4799 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4800 DP_NOTICE(p_hwfn, true,
4801 "SR-IOV sanity check failed, can't set min rate\n");
4806 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4807 vport_id = vf->vport_id;
4809 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
4812 #ifndef LINUX_REMOVE
4813 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4814 struct ecore_ptt *p_ptt,
4816 struct ecore_eth_stats *p_stats)
4818 struct ecore_vf_info *vf;
4820 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4824 if (vf->state != VF_ENABLED)
4827 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4828 vf->abs_vf_id + 0x10, false);
4830 return ECORE_SUCCESS;
4833 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
4836 struct ecore_vf_info *p_vf;
4838 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4842 return p_vf->num_rxqs;
4845 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
4848 struct ecore_vf_info *p_vf;
4850 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4854 return p_vf->num_active_rxqs;
4857 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
4860 struct ecore_vf_info *p_vf;
4862 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4869 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
4872 struct ecore_vf_info *p_vf;
4874 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4878 return p_vf->num_sbs;
4881 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
4884 struct ecore_vf_info *p_vf;
4886 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4890 return (p_vf->state == VF_FREE);
4893 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4896 struct ecore_vf_info *p_vf;
4898 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4902 return (p_vf->state == VF_ACQUIRED);
4905 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
4908 struct ecore_vf_info *p_vf;
4910 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4914 return (p_vf->state == VF_ENABLED);
4917 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4920 struct ecore_vf_info *p_vf;
4922 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4926 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4930 enum _ecore_status_t
4931 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4933 struct ecore_wfq_data *vf_vp_wfq;
4934 struct ecore_vf_info *vf_info;
4936 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4940 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4942 if (vf_vp_wfq->configured)
4943 return vf_vp_wfq->min_speed;
4948 #ifdef CONFIG_ECORE_SW_CHANNEL
4949 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
4952 struct ecore_vf_info *vf_info;
4954 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4958 vf_info->b_hw_channel = b_is_hw;