2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 #ifndef __ECORE_L2_API_H__
32 #define __ECORE_L2_API_H__
34 #include "ecore_status.h"
35 #include "ecore_sp_api.h"
36 #include "ecore_int_api.h"
41 ECORE_RSS_IPV4_TCP = 0x4,
42 ECORE_RSS_IPV6_TCP = 0x8,
43 ECORE_RSS_IPV4_UDP = 0x10,
44 ECORE_RSS_IPV6_UDP = 0x20,
47 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
48 #define ECORE_RSS_IND_TABLE_SIZE 128
49 #define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
51 #define ECORE_MAX_PHC_DRIFT_PPB 291666666
53 enum ecore_ptp_filter_type {
55 ECORE_PTP_FILTER_IPV4,
56 ECORE_PTP_FILTER_IPV4_IPV6,
57 ECORE_PTP_FILTER_L2_IPV4_IPV6
60 struct ecore_queue_start_common_params {
61 /* Should always be relative to entity sending this. */
65 /* Relative, but relevant only for PFs */
68 struct ecore_sb_info *p_sb;
72 struct ecore_rxq_start_ret_params {
73 void OSAL_IOMEM *p_prod;
77 struct ecore_txq_start_ret_params {
78 void OSAL_IOMEM *p_doorbell;
82 struct ecore_rss_params {
86 u8 update_rss_capabilities;
87 u8 update_rss_ind_table;
90 u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
92 /* Indirection table consist of rx queue handles */
93 void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
94 u32 rss_key[ECORE_RSS_KEY_SIZE];
97 struct ecore_sge_tpa_params {
98 u8 max_buffers_per_cqe;
100 u8 update_tpa_en_flg;
103 u8 tpa_ipv4_tunn_en_flg;
104 u8 tpa_ipv6_tunn_en_flg;
106 u8 update_tpa_param_flg;
107 u8 tpa_pkt_split_flg;
108 u8 tpa_hdr_data_split_flg;
109 u8 tpa_gro_consistent_flg;
112 u16 tpa_min_size_to_start;
113 u16 tpa_min_size_to_cont;
116 enum ecore_filter_opcode {
120 ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
121 ECORE_FILTER_FLUSH, /* Removes all filters */
124 enum ecore_filter_ucast_type {
127 ECORE_FILTER_MAC_VLAN,
128 ECORE_FILTER_INNER_MAC,
129 ECORE_FILTER_INNER_VLAN,
130 ECORE_FILTER_INNER_PAIR,
131 ECORE_FILTER_INNER_MAC_VNI_PAIR,
132 ECORE_FILTER_MAC_VNI_PAIR,
136 struct ecore_filter_ucast {
137 enum ecore_filter_opcode opcode;
138 enum ecore_filter_ucast_type type;
142 u8 vport_to_remove_from;
143 unsigned char mac[ETH_ALEN];
149 struct ecore_filter_mcast {
150 /* MOVE is not supported for multicast */
151 enum ecore_filter_opcode opcode;
153 u8 vport_to_remove_from;
155 #define ECORE_MAX_MC_ADDRS 64
156 unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
159 struct ecore_filter_accept_flags {
160 u8 update_rx_mode_config;
161 u8 update_tx_mode_config;
164 #define ECORE_ACCEPT_NONE 0x01
165 #define ECORE_ACCEPT_UCAST_MATCHED 0x02
166 #define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
167 #define ECORE_ACCEPT_MCAST_MATCHED 0x08
168 #define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
169 #define ECORE_ACCEPT_BCAST 0x20
172 struct ecore_arfs_config_params {
177 bool arfs_enable; /* Enable or disable arfs mode */
180 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
181 * FW will assert in the following cases, so driver should take care...:
182 * 1. Adding a filter to a full table.
183 * 2. Adding a filter which already exists on that vport.
184 * 3. Removing a filter which doesn't exist.
188 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
189 struct ecore_filter_ucast *p_filter_cmd,
190 enum spq_mode comp_mode,
191 struct ecore_spq_comp_cb *p_comp_data);
193 /* Add / remove / move multicast MAC filters. */
195 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
196 struct ecore_filter_mcast *p_filter_cmd,
197 enum spq_mode comp_mode,
198 struct ecore_spq_comp_cb *p_comp_data);
200 /* Set "accept" filters */
202 ecore_filter_accept_cmd(
203 struct ecore_dev *p_dev,
205 struct ecore_filter_accept_flags accept_flags,
206 u8 update_accept_any_vlan,
208 enum spq_mode comp_mode,
209 struct ecore_spq_comp_cb *p_comp_data);
212 * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
214 * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
215 * the VPort ID is not currently initialized.
219 * @p_params Inputs; Relative for PF [SB being an exception]
220 * @param bd_max_bytes Maximum bytes that can be placed on a BD
221 * @param bd_chain_phys_addr Physical address of BDs for receive.
222 * @param cqe_pbl_addr Physical address of the CQE PBL Table.
223 * @param cqe_pbl_size Size of the CQE PBL Table
224 * @param p_ret_params Pointed struct to be filled with outputs.
226 * @return enum _ecore_status_t
229 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
231 struct ecore_queue_start_common_params *p_params,
233 dma_addr_t bd_chain_phys_addr,
234 dma_addr_t cqe_pbl_addr,
236 struct ecore_rxq_start_ret_params *p_ret_params);
239 * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
242 * @param p_rxq Handler of queue to close
243 * @param eq_completion_only If True completion will be on
244 * EQe, if False completion will be
245 * on EQe if p_hwfn opaque
246 * different from the RXQ opaque
248 * @param cqe_completion If True completion will be
250 * @return enum _ecore_status_t
253 ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
255 bool eq_completion_only,
256 bool cqe_completion);
259 * @brief - TX Queue Start Ramrod
261 * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
262 * the VPort is not currently initialized.
267 * @param tc traffic class to use with this L2 txq
268 * @param pbl_addr address of the pbl array
269 * @param pbl_size number of entries in pbl
270 * @oaram p_ret_params Pointer to fill the return parameters in.
272 * @return enum _ecore_status_t
275 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
277 struct ecore_queue_start_common_params *p_params,
281 struct ecore_txq_start_ret_params *p_ret_params);
284 * @brief ecore_eth_tx_queue_stop - closes a Tx queue
287 * @param p_txq - handle to Tx queue needed to be closed
289 * @return enum _ecore_status_t
291 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
294 enum ecore_tpa_mode {
301 struct ecore_sp_vport_start_params {
302 enum ecore_tpa_mode tpa_mode;
303 bool remove_inner_vlan; /* Inner VLAN removal is enabled */
304 bool tx_switching; /* Vport supports tx-switching */
305 bool handle_ptp_pkts; /* Handle PTP packets */
306 bool only_untagged; /* Untagged pkt control */
307 bool drop_ttl0; /* Drop packets with TTL = 0 */
308 u8 max_buffers_per_cqe;
311 u8 vport_id; /* VPORT ID */
312 u16 mtu; /* VPORT MTU */
313 bool zero_placement_offset;
317 /* Strict behavior on transmission errors */
318 bool b_err_illegal_vlan_mode;
319 bool b_err_illegal_inband_mode;
320 bool b_err_vlan_insert_with_inband;
321 bool b_err_small_pkt;
323 bool b_err_anti_spoof;
324 bool b_err_ctrl_frame;
328 * @brief ecore_sp_vport_start -
330 * This ramrod initializes a VPort. An Assert if generated if the Function ID
331 * of the VPort is not enabled.
334 * @param p_params VPORT start params
336 * @return enum _ecore_status_t
339 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
340 struct ecore_sp_vport_start_params *p_params);
342 struct ecore_sp_vport_update_params {
345 u8 update_vport_active_rx_flg;
346 u8 vport_active_rx_flg;
347 u8 update_vport_active_tx_flg;
348 u8 vport_active_tx_flg;
349 u8 update_inner_vlan_removal_flg;
350 u8 inner_vlan_removal_flg;
351 u8 silent_vlan_removal_flg;
352 u8 update_default_vlan_enable_flg;
353 u8 default_vlan_enable_flg;
354 u8 update_default_vlan_flg;
356 u8 update_tx_switching_flg;
358 u8 update_approx_mcast_flg;
359 u8 update_anti_spoofing_en_flg;
361 u8 update_accept_any_vlan_flg;
363 unsigned long bins[8];
364 struct ecore_rss_params *rss_params;
365 struct ecore_filter_accept_flags accept_flags;
366 struct ecore_sge_tpa_params *sge_tpa_params;
370 * @brief ecore_sp_vport_update -
372 * This ramrod updates the parameters of the VPort. Every field can be updated
373 * independently, according to flags.
375 * This ramrod is also used to set the VPort state to active after creation.
376 * An Assert is generated if the VPort does not contain an RX queue.
381 * @return enum _ecore_status_t
384 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
385 struct ecore_sp_vport_update_params *p_params,
386 enum spq_mode comp_mode,
387 struct ecore_spq_comp_cb *p_comp_data);
389 * @brief ecore_sp_vport_stop -
391 * This ramrod closes a VPort after all its RX and TX queues are terminated.
392 * An Assert is generated if any queues are left open.
396 * @param vport_id VPort ID
398 * @return enum _ecore_status_t
400 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
405 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
407 struct ecore_filter_ucast *p_filter_cmd,
408 enum spq_mode comp_mode,
409 struct ecore_spq_comp_cb *p_comp_data);
412 * @brief ecore_sp_rx_eth_queues_update -
414 * This ramrod updates an RX queue. It is used for setting the active state
415 * of the queue and updating the TPA and SGE parameters.
417 * @note Final phase API.
420 * @param pp_rxq_handlers An array of queue handlers to be updated.
421 * @param num_rxqs number of queues to update.
422 * @param complete_cqe_flg Post completion to the CQE Ring if set
423 * @param complete_event_flg Post completion to the Event Ring if set
427 * @return enum _ecore_status_t
431 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
432 void **pp_rxq_handlers,
435 u8 complete_event_flg,
436 enum spq_mode comp_mode,
437 struct ecore_spq_comp_cb *p_comp_data);
439 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
440 struct ecore_ptt *p_ptt,
441 struct ecore_eth_stats *stats,
442 u16 statistics_bin, bool b_get_port_stats);
444 void ecore_get_vport_stats(struct ecore_dev *p_dev,
445 struct ecore_eth_stats *stats);
447 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
450 *@brief ecore_arfs_mode_configure -
452 *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
453 *and atleast one of ipv4 or ipv6 true to enable rfs mode.
457 *@param p_cfg_params arfs mode configuration parameters.
460 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
461 struct ecore_ptt *p_ptt,
462 struct ecore_arfs_config_params *p_cfg_params);