2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * File : ecore_init_fw_funcs.c
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include "ecore_init_ops.h"
38 #include "ecore_rt_defs.h"
39 #include "ecore_hsi_common.h"
40 #include "ecore_hsi_init_func.h"
41 #include "ecore_hsi_eth.h"
42 #include "ecore_hsi_init_tool.h"
43 #include "ecore_iro.h"
44 #include "ecore_init_fw_funcs.h"
46 #define CDU_VALIDATION_DEFAULT_CFG 61
48 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
49 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
50 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
51 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
53 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
54 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
57 /* General constants */
58 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
59 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
60 #define QM_INVALID_PQ_ID 0xffff
63 #define QM_BYPASS_EN 1
64 #define QM_BYTE_CRD_EN 1
66 /* Other PQ constants */
67 #define QM_OTHER_PQS_PER_PF 4
70 #define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
74 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
75 #define QM_WFQ_UPPER_BOUND 62500000
77 /* Bit of VOQ in WFQ VP PQ map */
78 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
80 /* Bit of PF in WFQ VP PQ map */
81 #define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
82 #define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
84 /* 0x9000 = 4*9*1024 */
85 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
87 /* 0.7 * upper bound (62500000) */
88 #define QM_WFQ_MAX_INC_VAL 43750000
90 /* Number of VOQs in E5 QmWfqCrd register */
91 #define QM_WFQ_CRD_E5_NUM_VOQS 16
95 /* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
96 #define QM_RL_UPPER_BOUND 62500000
99 #define QM_RL_PERIOD 5
101 /* Period in 25MHz cycles */
102 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
104 /* 0.7 * upper bound (62500000) */
105 #define QM_RL_MAX_INC_VAL 43750000
107 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
108 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
109 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
110 * although the credit increment value was the correct one and FW calculated
111 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
114 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * QM_RL_PERIOD * 101) / (8 * 100)), 1)
116 /* AFullOprtnstcCrdMask constants */
117 #define QM_OPPOR_LINE_VOQ_DEF 1
118 #define QM_OPPOR_FW_STOP_DEF 0
119 #define QM_OPPOR_PQ_EMPTY_DEF 1
121 /* Command Queue constants: */
123 /* Pure LB CmdQ lines (+spare) */
124 #define PBF_CMDQ_PURE_LB_LINES 150
126 #define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
128 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
130 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
132 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
134 /* BTB: blocks constants (block size = 256B) */
136 /* 256B blocks in 9700B packet */
137 #define BTB_JUMBO_PKT_BLOCKS 38
139 /* Headroom per-port */
140 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
141 #define BTB_PURE_LB_FACTOR 10
143 /* Factored (hence really 0.7) */
144 #define BTB_PURE_LB_RATIO 7
146 /* QM stop command constants */
147 #define QM_STOP_PQ_MASK_WIDTH 32
148 #define QM_STOP_CMD_ADDR 2
149 #define QM_STOP_CMD_STRUCT_SIZE 2
150 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
151 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
152 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
153 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
154 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
155 #define QM_STOP_CMD_GROUP_ID_MASK 15
156 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
157 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
158 #define QM_STOP_CMD_PQ_TYPE_MASK 1
159 #define QM_STOP_CMD_MAX_POLL_COUNT 100
160 #define QM_STOP_CMD_POLL_PERIOD_US 500
162 /* QM command macros */
163 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
164 #define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
166 #define QM_INIT_TX_PQ_MAP(map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map))
168 #define WRITE_PQ_INFO_TO_RAM 1
169 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
170 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4
172 /******************** INTERNAL IMPLEMENTATION *********************/
174 /* Returns the external VOQ number */
175 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
178 u8 max_phys_tcs_per_port)
180 if (tc == PURE_LB_TC)
181 return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 : MAX_NUM_PORTS_BB) + port_id;
183 return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS : max_phys_tcs_per_port) + tc;
186 /* Prepare PF RL enable/disable runtime init values */
187 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn,
190 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
192 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
193 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
195 /* Enable RLs for all VOQs */
196 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask);
197 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
198 if (num_ext_voqs >= 32)
199 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, (u32)(voq_bit_mask >> 32));
202 /* Write RL period */
203 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
204 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
206 /* Set credit threshold for QM bypass flow */
208 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_RL_UPPER_BOUND);
212 /* Prepare PF WFQ enable/disable runtime init values */
213 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn,
216 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
218 /* Set credit threshold for QM bypass flow */
219 if (pf_wfq_en && QM_BYPASS_EN)
220 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
223 /* Prepare VPORT RL enable/disable runtime init values */
224 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn,
227 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0);
230 /* Write RL period (use timer 0 only) */
231 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
232 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
234 /* Set credit threshold for QM bypass flow */
236 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_RL_UPPER_BOUND);
240 /* Prepare VPORT WFQ enable/disable runtime init values */
241 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn,
244 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0);
246 /* Set credit threshold for QM bypass flow */
247 if (vport_wfq_en && QM_BYPASS_EN)
248 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
251 /* Prepare runtime init values to allocate PBF command queue lines for
254 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
260 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
262 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines);
263 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
264 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
267 /* Prepare runtime init values to allocate PBF command queue lines. */
268 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
269 u8 max_ports_per_engine,
270 u8 max_phys_tcs_per_port,
271 struct init_qm_port_params port_params[MAX_NUM_PORTS])
273 u8 tc, ext_voq, port_id, num_tcs_in_port;
274 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
276 /* Clear PBF lines of all VOQs */
277 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
278 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
280 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
281 u16 phys_lines, phys_lines_per_tc;
283 if (!port_params[port_id].active)
286 /* Find number of command queue lines to divide between the
287 * active physical TCs. In E5, 1/8 of the lines are reserved.
288 * the lines for pure LB TC are subtracted.
290 phys_lines = port_params[port_id].num_pbf_cmd_lines;
291 if (ECORE_IS_E5(p_hwfn->p_dev))
292 phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO);
293 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
295 /* Find #lines per active physical TC */
297 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
298 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
300 phys_lines_per_tc = phys_lines / num_tcs_in_port;
302 /* Init registers per active TC */
303 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
304 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
305 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
306 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc);
309 /* Init registers for pure LB TC */
310 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
311 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES);
315 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
316 * specified port. The guaranteed BTB space is divided between the TCs as
317 * follows (shared space Is currently not used):
319 * B - BTB blocks for this port
320 * C - Number of physical TCs for this port
322 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
324 * b. B = B - 38 (remainder after global headroom allocation).
325 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
326 * d. B = B
\96 MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
327 * e. B/C blocks are allocated for each physical TC.
329 * - MTU is up to 9700 bytes (38 blocks)
330 * - All TCs are considered symmetrical (same rate and packet size)
331 * - No optimization for lossy TC (all are considered lossless). Shared space
332 * is not enabled and allocated for each TC.
334 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
335 u8 max_ports_per_engine,
336 u8 max_phys_tcs_per_port,
337 struct init_qm_port_params port_params[MAX_NUM_PORTS])
339 u32 usable_blocks, pure_lb_blocks, phys_blocks;
340 u8 tc, ext_voq, port_id, num_tcs_in_port;
342 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
343 if (!port_params[port_id].active)
346 /* Subtract headroom blocks */
347 usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS;
349 /* Find blocks per physical TC. use factor to avoid floating
353 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
354 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
357 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
358 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR);
359 phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port;
361 /* Init physical TCs */
362 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
363 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
364 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
365 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), phys_blocks);
369 /* Init pure LB TC */
370 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
371 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks);
375 /* Prepare Tx PQ mapping runtime init values for the specified PF */
376 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
377 struct ecore_ptt *p_ptt,
380 u8 max_phys_tcs_per_port,
387 u32 base_mem_addr_4kb,
388 struct init_qm_pq_params *pq_params,
389 struct init_qm_vport_params *vport_params)
391 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
392 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
393 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
394 u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
395 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
397 num_pqs = num_pf_pqs + num_vf_pqs;
399 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
400 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
402 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
403 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
404 mem_addr_4kb = base_mem_addr_4kb;
406 /* Set mapping from PQ group to PF */
407 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
408 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
411 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids));
412 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids));
414 /* Go over all Tx PQs */
415 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
416 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
417 u8 ext_voq, vport_id_in_pf;
418 bool is_vf_pq, rl_valid;
421 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
422 is_vf_pq = (i >= num_pf_pqs);
423 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls;
425 /* Update first Tx PQ of VPORT/TC */
426 vport_id_in_pf = pq_params[i].vport_id - start_vport;
427 first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
428 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
429 u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ? QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT));
431 /* Create new VP PQ */
432 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id;
433 first_tx_pq_id = pq_id;
435 /* Map VP PQ to VOQ and PF */
436 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val);
440 if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls)
441 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
443 /* Prepare PQ map entry */
444 if (ECORE_IS_E5(p_hwfn->p_dev)) {
445 struct qm_rf_pq_map_e5 tx_pq_map;
446 QM_INIT_TX_PQ_MAP(tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
449 struct qm_rf_pq_map_e4 tx_pq_map;
450 QM_INIT_TX_PQ_MAP(tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
453 /* Set base address */
454 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb);
457 if (WRITE_PQ_INFO_TO_RAM != 0)
460 pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id, port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0);
461 ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info);
464 /* If VF PQ, add indication to PQ VF mask */
466 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
467 mem_addr_4kb += vport_pq_mem_4kb;
470 mem_addr_4kb += pq_mem_4kb;
474 /* Store Tx PQ VF mask to size select register */
475 for (i = 0; i < num_tx_pq_vf_masks; i++)
476 if (tx_pq_vf_mask[i])
477 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]);
480 /* Prepare Other PQ mapping runtime init values for the specified PF */
481 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
485 u32 base_mem_addr_4kb)
487 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
488 u16 i, pq_id, pq_group;
490 /* A single other PQ group is used in each PF, where PQ group i is used
494 pq_size = num_pf_cids + num_tids;
495 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
496 mem_addr_4kb = base_mem_addr_4kb;
498 /* Map PQ group to PF */
499 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
502 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size));
504 /* Set base address */
505 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
506 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb);
507 mem_addr_4kb += pq_mem_4kb;
511 /* Prepare PF WFQ runtime init values for the specified PF.
512 * Return -1 on error.
514 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
518 u8 max_phys_tcs_per_port,
520 struct init_qm_pq_params *pq_params)
522 u32 inc_val, crd_reg_offset;
526 inc_val = QM_WFQ_INC_VAL(pf_wfq);
527 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
528 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
532 for (i = 0; i < num_tx_pqs; i++) {
533 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
534 crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ?
535 (ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id :
536 (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB);
537 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
538 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
539 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
545 /* Prepare PF RL runtime init values for the specified PF.
546 * Return -1 on error.
548 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn,
554 inc_val = QM_RL_INC_VAL(pf_rl);
555 if (inc_val > QM_RL_MAX_INC_VAL) {
556 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
560 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
561 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
562 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
567 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
568 * Return -1 on error.
570 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
572 struct init_qm_vport_params *vport_params)
578 /* Go over all PF VPORTs */
579 for (i = 0; i < num_vports; i++) {
580 if (!vport_params[i].vport_wfq)
583 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
584 if (inc_val > QM_WFQ_MAX_INC_VAL) {
585 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
589 /* Each VPORT can have several VPORT PQ IDs for various TCs */
590 for (tc = 0; tc < NUM_OF_TCS; tc++) {
591 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
592 if (vport_pq_id != QM_INVALID_PQ_ID) {
593 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
594 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val);
602 /* Prepare VPORT RL runtime init values for the specified VPORTs.
603 * Return -1 on error.
605 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
608 struct init_qm_vport_params *vport_params)
613 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
614 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
618 /* Go over all PF VPORTs */
619 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
620 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
621 if (inc_val > QM_RL_MAX_INC_VAL) {
622 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
626 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
627 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
628 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val);
634 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
635 struct ecore_ptt *p_ptt)
639 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) {
640 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
641 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
644 /* Check if timeout while waiting for SDM command ready */
645 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
646 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n");
653 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
654 struct ecore_ptt *p_ptt,
659 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
662 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
663 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
664 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
665 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
666 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
668 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
672 /******************** INTERFACE IMPLEMENTATION *********************/
674 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
680 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
681 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
682 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
685 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
686 u8 max_ports_per_engine,
687 u8 max_phys_tcs_per_port,
692 struct init_qm_port_params port_params[MAX_NUM_PORTS])
696 /* Init AFullOprtnstcCrdMask */
697 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
698 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
699 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
700 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
701 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
702 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
703 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
704 (QM_OPPOR_PQ_EMPTY_DEF << QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
705 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
707 /* Enable/disable PF RL */
708 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
710 /* Enable/disable PF WFQ */
711 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
713 /* Enable/disable VPORT RL */
714 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
716 /* Enable/disable VPORT WFQ */
717 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
719 /* Init PBF CMDQ line credit */
720 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
722 /* Init BTB blocks in PBF */
723 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
728 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
729 struct ecore_ptt *p_ptt,
732 u8 max_phys_tcs_per_port,
743 struct init_qm_pq_params *pq_params,
744 struct init_qm_vport_params *vport_params)
746 u32 other_mem_size_4kb;
749 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
751 /* Clear first Tx PQ ID array for each VPORT */
752 for(i = 0; i < num_vports; i++)
753 for(tc = 0; tc < NUM_OF_TCS; tc++)
754 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
756 /* Map Other PQs (if any) */
757 #if QM_OTHER_PQS_PER_PF > 0
758 ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
762 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
763 start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
767 if (ecore_pf_wfq_rt_init(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, num_pf_pqs + num_vf_pqs, pq_params))
771 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
775 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
779 if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, vport_params))
785 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
786 struct ecore_ptt *p_ptt,
792 inc_val = QM_WFQ_INC_VAL(pf_wfq);
793 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
794 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
798 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
803 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
804 struct ecore_ptt *p_ptt,
810 inc_val = QM_RL_INC_VAL(pf_rl);
811 if (inc_val > QM_RL_MAX_INC_VAL) {
812 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
816 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
817 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
822 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
823 struct ecore_ptt *p_ptt,
824 u16 first_tx_pq_id[NUM_OF_TCS],
831 inc_val = QM_WFQ_INC_VAL(vport_wfq);
832 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
833 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
837 for (tc = 0; tc < NUM_OF_TCS; tc++) {
838 vport_pq_id = first_tx_pq_id[tc];
839 if (vport_pq_id != QM_INVALID_PQ_ID) {
840 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
847 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
848 struct ecore_ptt *p_ptt,
852 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
854 if (vport_id >= max_qm_global_rls) {
855 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
859 inc_val = QM_RL_INC_VAL(vport_rl);
860 if (inc_val > QM_RL_MAX_INC_VAL) {
861 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
865 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
866 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
871 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
872 struct ecore_ptt *p_ptt,
878 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0};
879 u32 pq_mask = 0, last_pq, pq_id;
881 last_pq = start_pq + num_pqs - 1;
883 /* Set command's PQ type */
884 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
886 /* Go over requested PQs */
887 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
889 /* Set PQ bit in mask (stop command only) */
891 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
893 /* If last PQ or end of PQ mask, write command */
894 if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) {
895 QM_CMD_SET_FIELD(cmd_arr, (u32)QM_STOP_CMD, PAUSE_MASK, pq_mask);
896 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH);
897 if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1]))
906 #ifndef UNUSED_HSI_FUNC
908 /* NIG: ETS configuration constants */
909 #define NIG_TX_ETS_CLIENT_OFFSET 4
910 #define NIG_LB_ETS_CLIENT_OFFSET 1
911 #define NIG_ETS_MIN_WFQ_BYTES 1600
913 /* NIG: ETS constants */
914 #define NIG_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
916 /* NIG: RL constants */
918 /* Byte base type value */
919 #define NIG_RL_BASE_TYPE 1
922 #define NIG_RL_PERIOD 1
924 /* Period in 25MHz cycles */
925 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
928 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
930 #define NIG_RL_MAX_VAL(inc_val,mtu) (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
932 /* NIG: packet prioritry configuration constants */
933 #define NIG_PRIORITY_MAP_TC_BITS 4
936 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
937 struct ecore_ptt *p_ptt,
938 struct init_ets_req* req,
941 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
942 u32 tc_bound_base_addr, tc_bound_addr_diff;
943 u8 sp_tc_map = 0, wfq_tc_map = 0;
944 u8 tc, num_tc, tc_client_offset;
946 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
947 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
948 min_weight = 0xffffffff;
949 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
950 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
951 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
952 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
953 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
954 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
956 for (tc = 0; tc < num_tc; tc++) {
957 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
961 sp_tc_map |= (1 << tc);
963 if (!tc_req->use_wfq)
967 wfq_tc_map |= (1 << tc);
969 /* Find minimal weight */
970 if (tc_req->weight < min_weight)
971 min_weight = tc_req->weight;
975 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset));
978 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset));
980 /* Write WFQ weights */
981 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
982 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
985 if (!tc_req->use_wfq)
988 /* Translate weight to bytes */
989 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
991 /* Write WFQ weight */
992 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + tc_weight_addr_diff * tc_client_offset, byte_weight);
994 /* Write WFQ upper bound */
995 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + tc_bound_addr_diff * tc_client_offset, NIG_ETS_UP_BOUND(byte_weight, req->mtu));
999 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1000 struct ecore_ptt *p_ptt,
1001 struct init_nig_lb_rl_req* req)
1003 u32 ctrl, inc_val, reg_offset;
1006 /* Disable global MAC+LB RL */
1007 ctrl = NIG_RL_BASE_TYPE << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1008 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1010 /* Configure and enable global MAC+LB RL */
1011 if (req->lb_mac_rate) {
1014 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
1015 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1016 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val);
1017 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu));
1020 ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1021 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1024 /* Disable global LB-only RL */
1025 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1026 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1028 /* Configure and enable global LB-only RL */
1032 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
1033 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1034 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val);
1035 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu));
1038 ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1039 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1043 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) {
1046 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1047 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1049 /* Configure and enable TC RL */
1050 if (!req->tc_rate[tc])
1054 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + reg_offset, NIG_RL_PERIOD_CLK_25M);
1055 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1056 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val);
1057 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1060 ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1061 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1065 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1066 struct ecore_ptt *p_ptt,
1067 struct init_nig_pri_tc_map_req* req)
1069 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1070 u32 pri_tc_mask = 0;
1073 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1074 if (!req->pri[pri].valid)
1077 pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
1078 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1081 /* Write priority -> TC mask */
1082 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1084 /* Write TC -> priority mask */
1085 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1086 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]);
1087 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]);
1091 #endif /* UNUSED_HSI_FUNC */
1093 #ifndef UNUSED_HSI_FUNC
1095 /* PRS: ETS configuration constants */
1096 #define PRS_ETS_MIN_WFQ_BYTES 1600
1097 #define PRS_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1100 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1101 struct ecore_ptt *p_ptt,
1102 struct init_ets_req* req)
1104 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1105 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1107 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1108 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1110 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1111 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1115 sp_tc_map |= (1 << tc);
1117 if (!tc_req->use_wfq)
1120 /* Update WFQ map */
1121 wfq_tc_map |= (1 << tc);
1123 /* Find minimal weight */
1124 if (tc_req->weight < min_weight)
1125 min_weight = tc_req->weight;
1129 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1132 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map);
1134 /* Write WFQ weights */
1135 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1136 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1139 if (!tc_req->use_wfq)
1142 /* Translate weight to bytes */
1143 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
1145 /* Write WFQ weight */
1146 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * tc_weight_addr_diff, byte_weight);
1148 /* Write WFQ upper bound */
1149 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu));
1153 #endif /* UNUSED_HSI_FUNC */
1154 #ifndef UNUSED_HSI_FUNC
1156 /* BRB: RAM configuration constants */
1157 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1158 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1159 #define BRB_BLOCK_SIZE 128
1160 #define BRB_MIN_BLOCKS_PER_TC 9
1161 #define BRB_HYST_BYTES 10240
1162 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1164 /* Temporary big RAM allocation - should be updated */
1165 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1166 struct ecore_ptt *p_ptt,
1167 struct init_brb_ram_req* req)
1169 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1170 u32 active_port_blocks, reg_offset = 0;
1171 u8 port, active_ports = 0;
1173 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
1174 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
1175 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : BRB_TOTAL_RAM_BLOCKS_BB;
1177 /* Find number of active ports */
1178 for (port = 0; port < MAX_NUM_PORTS; port++)
1179 if (req->num_active_tcs[port])
1182 active_port_blocks = (u32)(total_blocks / active_ports);
1184 for (port = 0; port < req->max_ports_per_engine; port++) {
1185 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1186 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1187 u32 tc_guaranteed_blocks;
1190 /* Calculate per-port sizes */
1191 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
1192 port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0;
1193 port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks;
1194 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1195 full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
1196 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1197 pause_xoff_th = tc_headroom_blocks;
1198 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1200 /* Init total size per port */
1201 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks);
1203 /* Init shared size per port */
1204 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks);
1206 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1207 /* Clear init values for non-active TCs */
1208 if (tc == req->num_active_tcs[port]) {
1209 tc_guaranteed_blocks = 0;
1216 /* Init guaranteed size per TC */
1217 ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset, tc_guaranteed_blocks);
1218 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, BRB_HYST_BLOCKS);
1220 /* Init pause/full thresholds per physical TC - for
1223 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th);
1224 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th);
1225 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th);
1226 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th);
1228 /* Init pause/full thresholds per physical TC - for
1231 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th);
1232 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th);
1233 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th);
1234 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th);
1239 #endif /* UNUSED_HSI_FUNC */
1240 #ifndef UNUSED_HSI_FUNC
1242 /* In MF, should be called once per engine to set EtherType of OuterTag */
1243 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1245 /* Update PRS register */
1246 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1248 /* Update NIG register */
1249 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1251 /* Update PBF register */
1252 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1255 /* In MF, should be called once per port to set EtherType of OuterTag */
1256 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1258 /* Update DORQ register */
1259 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1262 #endif /* UNUSED_HSI_FUNC */
1265 #define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0)
1266 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1268 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1269 struct ecore_ptt *p_ptt,
1272 /* Update PRS register */
1273 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1275 /* Update NIG register */
1276 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1278 /* Update PBF register */
1279 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1282 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1283 struct ecore_ptt *p_ptt,
1288 /* Update PRS register */
1289 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1290 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable);
1291 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1292 if (reg_val) /* TODO: handle E5 init */
1293 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1295 /* Update NIG register */
1296 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1297 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable);
1298 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1300 /* Update DORQ register */
1301 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1304 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1305 struct ecore_ptt *p_ptt,
1306 bool eth_gre_enable,
1311 /* Update PRS register */
1312 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1313 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
1314 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable);
1315 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1316 if (reg_val) /* TODO: handle E5 init */
1317 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1319 /* Update NIG register */
1320 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1321 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
1322 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable);
1323 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1325 /* Update DORQ registers */
1326 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
1327 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1330 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1331 struct ecore_ptt *p_ptt,
1335 /* Update PRS register */
1336 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1338 /* Update NIG register */
1339 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1341 /* Update PBF register */
1342 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1345 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1346 struct ecore_ptt *p_ptt,
1347 bool eth_geneve_enable,
1348 bool ip_geneve_enable)
1352 /* Update PRS register */
1353 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1354 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable);
1355 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable);
1356 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1357 if (reg_val) /* TODO: handle E5 init */
1358 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1360 /* Update NIG register */
1361 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0);
1362 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
1364 /* EDPM with geneve tunnel not supported in BB */
1365 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1368 /* Update DORQ registers */
1369 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0);
1370 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0);
1373 #ifndef UNUSED_HSI_FUNC
1375 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1376 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1377 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1378 #define PARSER_ETH_CONN_CM_HDR 0
1379 #define CAM_LINE_SIZE sizeof(u32)
1380 #define RAM_LINE_SIZE sizeof(u64)
1381 #define REG_SIZE sizeof(u32)
1384 void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
1385 struct ecore_ptt *p_ptt,
1388 union gft_cam_line_union cam_line;
1389 struct gft_ram_line ram_line;
1390 u32 i, *ram_line_ptr;
1392 ram_line_ptr = (u32*)&ram_line;
1394 /* Stop using gft logic, disable gft search */
1395 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1396 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
1398 /* Clean ram & cam for next rfs/gft session*/
1401 OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
1402 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
1405 OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
1407 /* Each iteration write to reg */
1408 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1409 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
1413 void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn,
1414 struct ecore_ptt *p_ptt)
1416 u32 rfs_cm_hdr_event_id;
1418 /* Set RFS event ID to be awakened i Tstorm By Prs */
1419 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1420 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1421 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1422 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1425 void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
1426 struct ecore_ptt *p_ptt,
1433 u32 rfs_cm_hdr_event_id, *ram_line_ptr;
1434 union gft_cam_line_union cam_line;
1435 struct gft_ram_line ram_line;
1438 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1439 ram_line_ptr = (u32*)&ram_line;
1442 DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6\n");
1444 DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - udp or tcp\n");
1446 /* Set RFS event ID to be awakened i Tstorm By Prs */
1447 rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1448 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1449 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1451 /* Configure Registers for RFS mode */
1453 /* Enable gft search */
1454 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1456 /* Do not load context only cid in PRS on match. */
1457 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1459 /* Cam line is now valid!! */
1460 cam_line.cam_line_mapped.camline = 0;
1461 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_VALID, 1);
1463 /* Filters are per PF!! */
1464 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1465 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1467 if (!(tcp && udp)) {
1468 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1470 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL);
1472 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL);
1475 if (!(ipv4 && ipv6)) {
1476 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1478 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4);
1480 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6);
1483 /* Write characteristics to cam */
1484 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
1485 cam_line.cam_line_mapped.camline = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id);
1487 /* Write line to RAM - compare to filter 4 tuple */
1490 SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1491 SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1492 SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1493 SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1494 SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
1495 SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1498 /* Each iteration write to reg */
1499 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1500 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
1502 /* Set default profile so that no filter match will happen */
1503 ram_line.lo = 0xffffffff;
1504 ram_line.hi = 0x3ff;
1506 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1507 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + i*REG_SIZE, *(ram_line_ptr + i));
1511 #endif /* UNUSED_HSI_FUNC */
1513 /* Configure VF zone size mode*/
1514 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode, bool runtime_init)
1516 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1517 u32 msdm_vf_offset_mask;
1519 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1520 msdm_vf_size_log += 1;
1521 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1522 msdm_vf_size_log += 2;
1524 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1527 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log);
1528 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask);
1531 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1532 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1536 /* Get mstorm statistics for offset by VF zone size mode */
1537 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id, u16 vf_zone_size_mode)
1539 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1541 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) {
1542 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1543 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS);
1544 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1545 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS);
1551 /* Get mstorm VF producer offset by VF zone size mode */
1552 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id, u16 vf_zone_size_mode)
1554 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1556 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1557 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1558 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id;
1559 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1560 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id;
1566 #define CRC8_INIT_VALUE 0xFF
1567 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1569 /* Calculate and return CDU validation byte per connection type/region/cid */
1570 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1572 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1574 static u8 crc8_table_valid; /*automatically initialized to 0*/
1575 u8 crc, validation_byte = 0;
1576 u32 validation_string = 0;
1579 if (crc8_table_valid == 0) {
1580 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1581 crc8_table_valid = 1;
1584 /* The CRC is calculated on the String-to-compress:
1585 * [31:8] = {CID[31:20],CID[11:0]}
1589 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1590 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1592 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1593 validation_string |= ((region & 0xF) << 4);
1595 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1596 validation_string |= (conn_type & 0xF);
1598 /* Convert to big-endian and calculate CRC8*/
1599 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1601 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
1603 /* The validation byte [7:0] is composed:
1604 * for type A validation
1605 * [7] = active configuration bit
1608 * for type B validation
1609 * [7] = active configuration bit
1610 * [6:3] = connection_type[3:0]
1613 validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1615 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1616 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1618 validation_byte |= crc & 0x7F;
1620 return validation_byte;
1623 /* Calcualte and set validation bytes for session context */
1624 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1625 u8 ctx_type, u32 cid)
1627 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1629 p_ctx = (u8* const)p_ctx_mem;
1630 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1631 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1632 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1634 OSAL_MEMSET(p_ctx, 0, ctx_size);
1636 *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1637 *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1638 *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1641 /* Calcualte and set validation bytes for task context */
1642 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1645 u8 *p_ctx, *region1_val_ptr;
1647 p_ctx = (u8* const)p_ctx_mem;
1648 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1650 OSAL_MEMSET(p_ctx, 0, ctx_size);
1652 *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1655 /* Memset session context to 0 while preserving validation bytes */
1656 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1658 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1659 u8 x_val, t_val, u_val;
1661 p_ctx = (u8* const)p_ctx_mem;
1662 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1663 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1664 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1670 OSAL_MEMSET(p_ctx, 0, ctx_size);
1677 /* Memset task context to 0 while preserving validation bytes */
1678 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1680 u8 *p_ctx, *region1_val_ptr;
1683 p_ctx = (u8* const)p_ctx_mem;
1684 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1686 region1_val = *region1_val_ptr;
1688 OSAL_MEMSET(p_ctx, 0, ctx_size);
1690 *region1_val_ptr = region1_val;
1693 /* Enable and configure context validation */
1694 void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_ptt *p_ptt)
1698 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1699 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1700 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1702 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1703 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1704 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1706 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1707 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1708 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);