2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
32 #ifndef __ECORE_SRIOV_API_H__
33 #define __ECORE_SRIOV_API_H__
35 #include "common_hsi.h"
36 #include "ecore_status.h"
38 #define ECORE_ETH_VF_NUM_MAC_FILTERS 1
39 #define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
40 #define ECORE_VF_ARRAY_LENGTH (3)
42 #define IS_VF(p_dev) ((p_dev)->b_is_vf)
43 #define IS_PF(p_dev) (!((p_dev)->b_is_vf))
44 #ifdef CONFIG_ECORE_SRIOV
45 #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->p_iov_info))
47 #define IS_PF_SRIOV(p_hwfn) (0)
49 #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
50 #define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
52 /* @@@ TBD MichalK - what should this number be*/
53 #define ECORE_MAX_VF_CHAINS_PER_PF 16
55 /* vport update extended feature tlvs flags */
56 enum ecore_iov_vport_update_flag {
57 ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
58 ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
59 ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
60 ECORE_IOV_VP_UPDATE_MCAST = 3,
61 ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
62 ECORE_IOV_VP_UPDATE_RSS = 5,
63 ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
64 ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
65 ECORE_IOV_VP_UPDATE_MAX = 8,
68 /*PF to VF STATUS is part of vfpf-channel API
69 and must be forward compatible */
70 enum ecore_iov_pf_to_vf_status {
71 PFVF_STATUS_WAITING = 0,
74 PFVF_STATUS_NOT_SUPPORTED,
75 PFVF_STATUS_NO_RESOURCE,
77 PFVF_STATUS_MALICIOUS,
80 struct ecore_mcp_link_params;
81 struct ecore_mcp_link_state;
82 struct ecore_mcp_link_capabilities;
84 /* These defines are used by the hw-channel; should never change order */
85 #define VFPF_ACQUIRE_OS_LINUX (0)
86 #define VFPF_ACQUIRE_OS_WINDOWS (1)
87 #define VFPF_ACQUIRE_OS_ESX (2)
88 #define VFPF_ACQUIRE_OS_SOLARIS (3)
89 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
91 struct ecore_vf_acquire_sw_info {
96 struct ecore_public_vf_info {
97 /* These copies will later be reflected in the bulletin board,
98 * but this copy should be newer.
100 u8 forced_mac[ETH_ALEN];
104 struct ecore_iov_vf_init_params {
107 /* Number of requested Queues; Currently, don't support different
108 * number of Rx/Tx queues.
110 /* TODO - remove this limitation */
113 /* Allow the client to choose which qzones to use for Rx/Tx,
114 * and which queue_base to use for Tx queues on a per-queue basis.
115 * Notice values should be relative to the PF resources.
117 u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
118 u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
122 /* Should be set in case RSS is going to be used for VF */
126 #ifdef CONFIG_ECORE_SW_CHANNEL
127 /* This is SW channel related only... */
129 VF_PF_UNKNOWN_STATE = 0,
130 VF_PF_WAIT_FOR_START_REQUEST = 1,
131 VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
132 VF_PF_REQUEST_IN_PROCESSING = 3,
133 VF_PF_RESPONSE_READY = 4,
136 struct ecore_iov_sw_mbx {
137 enum mbx_state mbx_state;
147 * @brief Get the vf sw mailbox params
152 * @return struct ecore_iov_sw_mbx*
154 struct ecore_iov_sw_mbx*
155 ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
159 /* This struct is part of ecore_dev and contains data relevant to all hwfns;
160 * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
162 struct ecore_hw_sriov_info {
163 /* standard SRIOV capability fields, mostly for debugging */
164 int pos; /* capability position */
165 int nres; /* number of resources */
166 u32 cap; /* SR-IOV Capabilities */
167 u16 ctrl; /* SR-IOV Control */
168 u16 total_vfs; /* total VFs associated with the PF */
169 u16 num_vfs; /* number of vfs that have been started */
170 u16 initial_vfs; /* initial VFs associated with the PF */
171 u16 nr_virtfn; /* number of VFs available */
172 u16 offset; /* first VF Routing ID offset */
173 u16 stride; /* following VF stride */
174 u16 vf_device_id; /* VF device id */
175 u32 pgsz; /* page size for BAR alignment */
176 u8 link; /* Function Dependency Link */
181 #ifdef CONFIG_ECORE_SRIOV
183 * @brief mark/clear all VFs before/after an incoming PCIe sriov
189 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
193 * @brief mark/clear chosen VF before/after an incoming PCIe
200 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
205 * @brief ecore_iov_init_hw_for_vf - initialize the HW for
206 * enabling access of a VF. Also includes preparing the
207 * IGU for VF access. This needs to be called AFTER hw is
208 * initialized and BEFORE VF is loaded inside the VM.
214 * @return enum _ecore_status_t
217 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
218 struct ecore_ptt *p_ptt,
219 struct ecore_iov_vf_init_params *p_params);
222 * @brief ecore_iov_process_mbx_req - process a request received
229 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
230 struct ecore_ptt *p_ptt,
234 * @brief ecore_iov_release_hw_for_vf - called once upper layer
235 * knows VF is done with - can release any resources
236 * allocated for VF at this point. this must be done once
237 * we know VF is no longer loaded in VM.
243 * @return enum _ecore_status_t
245 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
246 struct ecore_ptt *p_ptt,
250 * @brief ecore_iov_set_vf_ctx - set a context for a given VF
256 * @return enum _ecore_status_t
258 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
263 * @brief FLR cleanup for all VFs
268 * @return enum _ecore_status_t
270 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
271 struct ecore_ptt *p_ptt);
274 * @brief FLR cleanup for single VF
280 * @return enum _ecore_status_t
283 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
284 struct ecore_ptt *p_ptt,
288 * @brief Update the bulletin with link information. Notice this does NOT
289 * send a bulletin update, only updates the PF's bulletin.
293 * @param params - the link params to use for the VF link configuration
294 * @param link - the link output to use for the VF link configuration
295 * @param p_caps - the link default capabilities.
297 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
299 struct ecore_mcp_link_params *params,
300 struct ecore_mcp_link_state *link,
301 struct ecore_mcp_link_capabilities *p_caps);
304 * @brief Returns link information as perceived by VF.
308 * @param p_params - the link params visible to vf.
309 * @param p_link - the link state visible to vf.
310 * @param p_caps - the link default capabilities visible to vf.
312 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
314 struct ecore_mcp_link_params *params,
315 struct ecore_mcp_link_state *link,
316 struct ecore_mcp_link_capabilities *p_caps);
319 * @brief return if the VF is pending FLR
326 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
330 * @brief Check if given VF ID @vfid is valid
331 * w.r.t. @b_enabled_only value
332 * if b_enabled_only = true - only enabled VF id is valid
333 * else any VF id less than max_vfs is valid
336 * @param rel_vf_id - Relative VF ID
337 * @param b_enabled_only - consider only enabled VF
338 * @param b_non_malicious - true iff we want to validate vf isn't malicious.
340 * @return bool - true for valid VF ID
342 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
344 bool b_enabled_only, bool b_non_malicious);
347 * @brief Get VF's public info structure
350 * @param vfid - Relative VF ID
351 * @param b_enabled_only - false if want to access even if vf is disabled
353 * @return struct ecore_public_vf_info *
355 struct ecore_public_vf_info*
356 ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
357 u16 vfid, bool b_enabled_only);
360 * @brief fills a bitmask of all VFs which have pending unhandled
365 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
369 * @brief Copy VF's message to PF's buffer
375 * @return enum _ecore_status_t
377 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
378 struct ecore_ptt *ptt,
381 * @brief Set forced MAC address in PFs copy of bulletin board
382 * and configures FW/HW to support the configuration.
388 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
392 * @brief Set MAC address in PFs copy of bulletin board without
399 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
403 * @brief Set default behaviour of VF in case no vlans are configured for it
404 * whether to accept only untagged traffic or all.
405 * Must be called prior to the VF vport-start.
408 * @param b_untagged_only
411 * @return ECORE_SUCCESS if configuration would stick.
414 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
415 bool b_untagged_only,
419 * @brief Get VFs opaque fid.
425 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
429 * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
430 * and configures FW/HW to support the configuration.
431 * Setting of pvid 0 would clear the feature.
436 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
440 * @brief Check if VF has VPORT instance. This can be used
441 * to check if VPORT is active.
445 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
448 * @brief PF posts the bulletin to the VF
454 * @return enum _ecore_status_t
456 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
458 struct ecore_ptt *p_ptt);
461 * @brief Check if given VF (@vfid) is marked as stopped
466 * @return bool : true if stopped
468 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
471 * @brief Configure VF anti spoofing
475 * @param val - spoofchk value - true/false
477 * @return enum _ecore_status_t
479 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
483 * @brief Get VF's configured spoof value.
488 * @return bool - spoofchk value - true/false
490 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
493 * @brief Check for SRIOV sanity by PF.
498 * @return bool - true if sanity checks passes, else false
500 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
503 * @brief Get the num of VF chains.
509 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
512 * @brief Get vf request mailbox params
516 * @param pp_req_virt_addr
517 * @param p_req_virt_size
519 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
521 void **pp_req_virt_addr,
522 u16 *p_req_virt_size);
525 * @brief Get vf mailbox params
529 * @param pp_reply_virt_addr
530 * @param p_reply_virt_size
532 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
534 void **pp_reply_virt_addr,
535 u16 *p_reply_virt_size);
538 * @brief Validate if the given length is a valid vfpf message
545 bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
548 * @brief Return the max pfvf message length
552 u32 ecore_iov_pfvf_msg_length(void);
555 * @brief Returns forced MAC address if one is configured
560 * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
562 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
566 * @brief Returns pvid if one is configured
571 * @return 0 if no pvid is configured, otherwise the pvid.
573 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
576 * @brief Configure VFs tx rate
581 * @param val - tx rate value in Mb/sec.
583 * @return enum _ecore_status_t
585 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
586 struct ecore_ptt *p_ptt,
590 * @brief - Retrieves the statistics associated with a VF
595 * @param p_stats - this will be filled with the VF statistics
597 * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
599 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
600 struct ecore_ptt *p_ptt,
602 struct ecore_eth_stats *p_stats);
605 * @brief - Retrieves num of rxqs chains
610 * @return num of rxqs chains.
612 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
616 * @brief - Retrieves num of active rxqs chains
623 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
627 * @brief - Retrieves ctx pointer
634 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
638 * @brief - Retrieves VF`s num sbs
645 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
649 * @brief - Returm true if VF is waiting for acquire
656 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
660 * @brief - Returm true if VF is acquired but not initialized
667 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
671 * @brief - Returm true if VF is acquired and initialized
678 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
682 * @brief - Returm true if VF has started in FW
689 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
693 * @brief - Get VF's vport min rate configured.
697 * @return - rate in Mbps
699 int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
702 * @brief - Configure min rate for VF's vport.
705 * @param - rate in Mbps
709 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
714 * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce parameters
715 * of VFs for Rx and Tx queue.
716 * While the API allows setting coalescing per-qid, all queues sharing a SB
717 * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
718 * otherwise configuration would break.
721 * @param rx_coal - Rx Coalesce value in micro seconds.
722 * @param tx_coal - TX Coalesce value in micro seconds.
729 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
730 u16 rx_coal, u16 tx_coal,
734 * @brief - Given a VF index, return index of next [including that] active VF.
739 * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
741 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
742 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
743 u16 vxlan_port, u16 geneve_port);
745 static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev, u8 to_disable) {}
746 static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev, u16 rel_vf_id, u8 to_disable) {}
747 static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_iov_vf_init_params *p_params) {return ECORE_INVAL;}
748 static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid) {}
749 static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) {return ECORE_SUCCESS;}
750 static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, u16 vf_id, void *ctx) {return ECORE_INVAL;}
751 static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) {return ECORE_INVAL;}
752 static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) {return ECORE_INVAL;}
753 static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid, struct ecore_mcp_link_params *params, struct ecore_mcp_link_state *link, struct ecore_mcp_link_capabilities *p_caps) {}
754 static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid, struct ecore_mcp_link_params *params, struct ecore_mcp_link_state *link, struct ecore_mcp_link_capabilities *p_caps) {}
755 static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
756 static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only) {return false;}
757 static OSAL_INLINE struct ecore_public_vf_info* ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid, bool b_enabled_only) {return OSAL_NULL;}
758 static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid) {}
759 static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn, u64 *events) {}
760 static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, struct ecore_ptt *ptt, int vfid) {return ECORE_INVAL;}
761 static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, u8 *mac, int vfid) {}
762 static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, u8 *mac, int vfid) {return ECORE_INVAL;}
763 static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) {return ECORE_INVAL;}
764 static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, u16 *opaque_fid) {}
765 static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn p_hwfn, u16 pvid, int vfid) {}
767 static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
768 static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, int vfid, struct ecore_ptt *p_ptt) {return ECORE_INVAL;}
769 static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
770 static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, int vfid, bool val) {return ECORE_INVAL;}
771 static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
772 static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
773 static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) {return 0;}
774 static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, u16 rel_vf_id, void **pp_req_virt_addr, u16 *p_req_virt_size) {}
775 static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, u16 rel_vf_id, void **pp_reply_virt_addr, u16 *p_reply_virt_size) {}
776 static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length) {return false;}
777 static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void) {return 0;}
778 static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return OSAL_NULL;}
779 static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
780 static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, int val) { return ECORE_INVAL; }
781 static OSAL_INLINE enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, struct ecore_eth_stats *p_stats) { return ECORE_INVAL; }
783 static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
784 static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
785 static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return OSAL_NULL;}
786 static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
787 static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
788 static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
789 static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
790 static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) { return 0; }
791 static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, int vfid, u32 rate) { return ECORE_INVAL; }
793 static OSAL_INLINE void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port) { return; }
794 static OSAL_INLINE u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { return E4_MAX_NUM_VFS; }
796 #define ecore_for_each_vf(_p_hwfn, _i) \
797 for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
798 _i < E4_MAX_NUM_VFS; \
799 _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))