2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * File : ecore_init_fw_funcs.c
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
37 #include "ecore_init_ops.h"
39 #include "ecore_rt_defs.h"
40 #include "ecore_hsi_common.h"
41 #include "ecore_hsi_init_func.h"
42 #include "ecore_hsi_eth.h"
43 #include "ecore_hsi_init_tool.h"
44 #include "ecore_iro.h"
45 #include "ecore_init_fw_funcs.h"
47 #define CDU_VALIDATION_DEFAULT_CFG 61
49 static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
50 { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
51 { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
52 { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
54 static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
55 { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
58 /* General constants */
59 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
60 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
61 #define QM_INVALID_PQ_ID 0xffff
64 #define QM_BYPASS_EN 1
65 #define QM_BYTE_CRD_EN 1
67 /* Other PQ constants */
68 #define QM_OTHER_PQS_PER_PF 4
72 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
73 #define QM_WFQ_UPPER_BOUND 62500000
75 /* Bit of VOQ in WFQ VP PQ map */
76 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0
78 /* Bit of PF in WFQ VP PQ map */
79 #define QM_WFQ_VP_PQ_PF_SHIFT 5
81 /* 0x9000 = 4*9*1024 */
82 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
84 /* 0.7 * upper bound (62500000) */
85 #define QM_WFQ_MAX_INC_VAL 43750000
89 /* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
90 #define QM_RL_UPPER_BOUND 62500000
93 #define QM_RL_PERIOD 5
95 /* Period in 25MHz cycles */
96 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
98 /* 0.7 * upper bound (62500000) */
99 #define QM_RL_MAX_INC_VAL 43750000
101 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
102 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
103 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
104 * although the credit increment value was the correct one and FW calculated
105 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
108 #define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * QM_RL_PERIOD * 101) / (8 * 100)), 1)
110 /* AFullOprtnstcCrdMask constants */
111 #define QM_OPPOR_LINE_VOQ_DEF 1
112 #define QM_OPPOR_FW_STOP_DEF 0
113 #define QM_OPPOR_PQ_EMPTY_DEF 1
115 /* Command Queue constants: */
117 /* Pure LB CmdQ lines (+spare) */
118 #define PBF_CMDQ_PURE_LB_LINES 150
120 #define PBF_CMDQ_LINES_RT_OFFSET(voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
122 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
124 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
126 /* BTB: blocks constants (block size = 256B) */
128 /* 256B blocks in 9700B packet */
129 #define BTB_JUMBO_PKT_BLOCKS 38
131 /* Headroom per-port */
132 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
133 #define BTB_PURE_LB_FACTOR 10
135 /* Factored (hence really 0.7) */
136 #define BTB_PURE_LB_RATIO 7
138 /* QM stop command constants */
139 #define QM_STOP_PQ_MASK_WIDTH 32
140 #define QM_STOP_CMD_ADDR 2
141 #define QM_STOP_CMD_STRUCT_SIZE 2
142 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
143 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
144 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
145 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
146 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
147 #define QM_STOP_CMD_GROUP_ID_MASK 15
148 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
149 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
150 #define QM_STOP_CMD_PQ_TYPE_MASK 1
151 #define QM_STOP_CMD_MAX_POLL_COUNT 100
152 #define QM_STOP_CMD_POLL_PERIOD_US 500
154 /* QM command macros */
155 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
156 #define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
159 #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * (max_phys_tcs_per_port) + (tc))
160 #define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
161 #define VOQ(port, tc, max_phys_tcs_per_port) ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
164 /******************** INTERNAL IMPLEMENTATION *********************/
166 /* Prepare PF RL enable/disable runtime init values */
167 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn,
170 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
173 /* Enable RLs for all VOQs */
174 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (1 << MAX_NUM_VOQS) - 1);
176 /* Write RL period */
177 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
178 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
180 /* Set credit threshold for QM bypass flow */
182 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_RL_UPPER_BOUND);
186 /* Prepare PF WFQ enable/disable runtime init values */
187 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn,
190 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
192 /* Set credit threshold for QM bypass flow */
193 if (pf_wfq_en && QM_BYPASS_EN)
194 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
197 /* Prepare VPORT RL enable/disable runtime init values */
198 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn,
201 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0);
204 /* Write RL period (use timer 0 only) */
205 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
206 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
208 /* Set credit threshold for QM bypass flow */
210 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_RL_UPPER_BOUND);
214 /* Prepare VPORT WFQ enable/disable runtime init values */
215 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn,
218 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0);
220 /* Set credit threshold for QM bypass flow */
221 if (vport_wfq_en && QM_BYPASS_EN)
222 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
225 /* Prepare runtime init values to allocate PBF command queue lines for
228 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
234 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
236 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), (u32)cmdq_lines);
237 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
238 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, qm_line_crd);
241 /* Prepare runtime init values to allocate PBF command queue lines. */
242 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
243 u8 max_ports_per_engine,
244 u8 max_phys_tcs_per_port,
245 struct init_qm_port_params port_params[MAX_NUM_PORTS])
247 u8 tc, voq, port_id, num_tcs_in_port;
249 /* Clear PBF lines for all VOQs */
250 for (voq = 0; voq < MAX_NUM_VOQS; voq++)
251 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
253 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
254 u16 phys_lines, phys_lines_per_tc;
256 if (!port_params[port_id].active)
259 /* Find #lines to divide between the active physical TCs */
260 phys_lines = port_params[port_id].num_pbf_cmd_lines - PBF_CMDQ_PURE_LB_LINES;
262 /* Find #lines per active physical TC */
264 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
265 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
267 phys_lines_per_tc = phys_lines / num_tcs_in_port;
269 /* Init registers per active TC */
270 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
271 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
272 voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
273 ecore_cmdq_lines_voq_rt_init(p_hwfn, voq, phys_lines_per_tc);
277 /* Init registers for pure LB TC */
278 ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), PBF_CMDQ_PURE_LB_LINES);
282 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
283 * specified port. The guaranteed BTB space is divided between the TCs as
284 * follows (shared space Is currently not used):
286 * B - BTB blocks for this port
287 * C - Number of physical TCs for this port
289 * a. 38 blocks (9700B jumbo frame) are allocated for global per port
291 * b. B = B - 38 (remainder after global headroom allocation).
292 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
293 * d. B = B % MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
294 * e. B/C blocks are allocated for each physical TC.
296 * - MTU is up to 9700 bytes (38 blocks)
297 * - All TCs are considered symmetrical (same rate and packet size)
298 * - No optimization for lossy TC (all are considered lossless). Shared space
299 * is not enabled and allocated for each TC.
301 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
302 u8 max_ports_per_engine,
303 u8 max_phys_tcs_per_port,
304 struct init_qm_port_params port_params[MAX_NUM_PORTS])
306 u32 usable_blocks, pure_lb_blocks, phys_blocks;
307 u8 tc, voq, port_id, num_tcs_in_port;
309 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
310 if (!port_params[port_id].active)
313 /* Subtract headroom blocks */
314 usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS;
316 /* Find blocks per physical TC. use factor to avoid floating
320 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
321 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
324 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
325 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR);
326 phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port;
328 /* Init physical TCs */
329 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
330 if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
331 voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
332 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), phys_blocks);
336 /* Init pure LB TC */
337 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)), pure_lb_blocks);
341 /* Prepare Tx PQ mapping runtime init values for the specified PF */
342 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
343 struct ecore_ptt *p_ptt,
346 u8 max_phys_tcs_per_port,
354 u32 base_mem_addr_4kb,
355 struct init_qm_pq_params *pq_params,
356 struct init_qm_vport_params *vport_params)
358 /* A bit per Tx PQ indicating if the PQ is associated with a VF */
359 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
360 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
361 u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
362 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
364 num_pqs = num_pf_pqs + num_vf_pqs;
366 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
367 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
369 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
370 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
371 mem_addr_4kb = base_mem_addr_4kb;
373 /* Set mapping from PQ group to PF */
374 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
375 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
378 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids));
379 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids));
381 /* Go over all Tx PQs */
382 for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
383 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
384 struct qm_rf_pq_map tx_pq_map;
385 bool is_vf_pq, rl_valid;
386 u8 voq, vport_id_in_pf;
389 voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
390 is_vf_pq = (i >= num_pf_pqs);
391 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls;
393 /* Update first Tx PQ of VPORT/TC */
394 vport_id_in_pf = pq_params[i].vport_id - start_vport;
395 first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
396 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
398 /* Create new VP PQ */
399 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id;
400 first_tx_pq_id = pq_id;
402 /* Map VP PQ to VOQ and PF */
403 STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << QM_WFQ_VP_PQ_PF_SHIFT));
407 if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls)
408 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
410 /* Fill PQ map entry */
411 OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
412 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
413 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
414 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
415 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, rl_valid ? pq_params[i].vport_id : 0);
416 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
417 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, pq_params[i].wrr_group);
419 /* Write PQ map entry to CAM */
420 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32*)&tx_pq_map));
422 /* Set base address */
423 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb);
425 /* If VF PQ, add indication to PQ VF mask */
427 tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
428 mem_addr_4kb += vport_pq_mem_4kb;
431 mem_addr_4kb += pq_mem_4kb;
435 /* Store Tx PQ VF mask to size select register */
436 for (i = 0; i < num_tx_pq_vf_masks; i++)
437 if (tx_pq_vf_mask[i])
438 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]);
441 /* Prepare Other PQ mapping runtime init values for the specified PF */
442 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
447 u32 base_mem_addr_4kb)
449 u32 pq_size, pq_mem_4kb, mem_addr_4kb;
450 u16 i, pq_id, pq_group;
452 /* A single other PQ group is used in each PF, where PQ group i is used
456 pq_size = num_pf_cids + num_tids;
457 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
458 mem_addr_4kb = base_mem_addr_4kb;
460 /* Map PQ group to PF */
461 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
464 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size));
466 /* Set base address */
467 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
468 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb);
469 mem_addr_4kb += pq_mem_4kb;
473 /* Prepare PF WFQ runtime init values for the specified PF.
474 * Return -1 on error.
476 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
480 u8 max_phys_tcs_per_port,
482 struct init_qm_pq_params *pq_params)
484 u32 inc_val, crd_reg_offset;
488 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
490 inc_val = QM_WFQ_INC_VAL(pf_wfq);
491 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
492 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
496 for(i = 0; i < num_tx_pqs; i++) {
497 voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
498 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
501 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
502 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
507 /* Prepare PF RL runtime init values for the specified PF.
508 * Return -1 on error.
510 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn,
516 inc_val = QM_RL_INC_VAL(pf_rl);
517 if (inc_val > QM_RL_MAX_INC_VAL) {
518 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
522 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
523 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
524 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
529 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
530 * Return -1 on error.
532 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
534 struct init_qm_vport_params *vport_params)
540 /* Go over all PF VPORTs */
541 for (i = 0; i < num_vports; i++) {
542 if (!vport_params[i].vport_wfq)
545 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
546 if (inc_val > QM_WFQ_MAX_INC_VAL) {
547 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
551 /* Each VPORT can have several VPORT PQ IDs for various TCs */
552 for (tc = 0; tc < NUM_OF_TCS; tc++) {
553 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
554 if (vport_pq_id != QM_INVALID_PQ_ID) {
555 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
556 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val);
564 /* Prepare VPORT RL runtime init values for the specified VPORTs.
565 * Return -1 on error.
567 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
570 struct init_qm_vport_params *vport_params)
575 if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
576 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
580 /* Go over all PF VPORTs */
581 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
582 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
583 if (inc_val > QM_RL_MAX_INC_VAL) {
584 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
588 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
589 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
590 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val);
596 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
597 struct ecore_ptt *p_ptt)
601 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) {
602 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
603 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
606 /* Check if timeout while waiting for SDM command ready */
607 if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
608 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n");
615 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
616 struct ecore_ptt *p_ptt,
621 if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
624 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
625 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
626 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
627 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
628 ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
630 return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
634 /******************** INTERFACE IMPLEMENTATION *********************/
636 u32 ecore_qm_pf_mem_size(u8 pf_id,
643 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
644 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
645 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
648 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
649 u8 max_ports_per_engine,
650 u8 max_phys_tcs_per_port,
655 struct init_qm_port_params port_params[MAX_NUM_PORTS])
659 /* Init AFullOprtnstcCrdMask */
660 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
661 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
662 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
663 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
664 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
665 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
666 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
667 (QM_OPPOR_PQ_EMPTY_DEF << QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
668 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
670 /* Enable/disable PF RL */
671 ecore_enable_pf_rl(p_hwfn, pf_rl_en);
673 /* Enable/disable PF WFQ */
674 ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
676 /* Enable/disable VPORT RL */
677 ecore_enable_vport_rl(p_hwfn, vport_rl_en);
679 /* Enable/disable VPORT WFQ */
680 ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
682 /* Init PBF CMDQ line credit */
683 ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
685 /* Init BTB blocks in PBF */
686 ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
691 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
692 struct ecore_ptt *p_ptt,
695 u8 max_phys_tcs_per_port,
707 struct init_qm_pq_params *pq_params,
708 struct init_qm_vport_params *vport_params)
710 u32 other_mem_size_4kb;
713 other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
715 /* Clear first Tx PQ ID array for each VPORT */
716 for(i = 0; i < num_vports; i++)
717 for(tc = 0; tc < NUM_OF_TCS; tc++)
718 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
720 /* Map Other PQs (if any) */
721 #if QM_OTHER_PQS_PER_PF > 0
722 ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids, num_tids, 0);
726 ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, is_first_pf, num_pf_cids, num_vf_cids,
727 start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
731 if (ecore_pf_wfq_rt_init(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, num_pf_pqs + num_vf_pqs, pq_params))
735 if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
739 if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
743 if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, vport_params))
749 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
750 struct ecore_ptt *p_ptt,
756 inc_val = QM_WFQ_INC_VAL(pf_wfq);
757 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
758 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
762 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
767 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
768 struct ecore_ptt *p_ptt,
774 inc_val = QM_RL_INC_VAL(pf_rl);
775 if (inc_val > QM_RL_MAX_INC_VAL) {
776 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
780 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
781 ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
786 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
787 struct ecore_ptt *p_ptt,
788 u16 first_tx_pq_id[NUM_OF_TCS],
795 inc_val = QM_WFQ_INC_VAL(vport_wfq);
796 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
797 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
801 for (tc = 0; tc < NUM_OF_TCS; tc++) {
802 vport_pq_id = first_tx_pq_id[tc];
803 if (vport_pq_id != QM_INVALID_PQ_ID) {
804 ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
811 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
812 struct ecore_ptt *p_ptt,
816 u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
818 if (vport_id >= max_qm_global_rls) {
819 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
823 inc_val = QM_RL_INC_VAL(vport_rl);
824 if (inc_val > QM_RL_MAX_INC_VAL) {
825 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
829 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
830 ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
835 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
836 struct ecore_ptt *p_ptt,
842 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0};
843 u32 pq_mask = 0, last_pq, pq_id;
845 last_pq = start_pq + num_pqs - 1;
847 /* Set command's PQ type */
848 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
850 /* Go over requested PQs */
851 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
853 /* Set PQ bit in mask (stop command only) */
855 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
857 /* If last PQ or end of PQ mask, write command */
858 if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) {
859 QM_CMD_SET_FIELD(cmd_arr, (u32)QM_STOP_CMD, PAUSE_MASK, pq_mask);
860 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH);
861 if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1]))
870 #ifndef UNUSED_HSI_FUNC
872 /* NIG: ETS configuration constants */
873 #define NIG_TX_ETS_CLIENT_OFFSET 4
874 #define NIG_LB_ETS_CLIENT_OFFSET 1
875 #define NIG_ETS_MIN_WFQ_BYTES 1600
877 /* NIG: ETS constants */
878 #define NIG_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
880 /* NIG: RL constants */
882 /* Byte base type value */
883 #define NIG_RL_BASE_TYPE 1
886 #define NIG_RL_PERIOD 1
888 /* Period in 25MHz cycles */
889 #define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
892 #define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
894 #define NIG_RL_MAX_VAL(inc_val,mtu) (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
896 /* NIG: packet prioritry configuration constants */
897 #define NIG_PRIORITY_MAP_TC_BITS 4
900 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
901 struct ecore_ptt *p_ptt,
902 struct init_ets_req* req,
905 u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
906 u32 tc_bound_base_addr, tc_bound_addr_diff;
907 u8 sp_tc_map = 0, wfq_tc_map = 0;
908 u8 tc, num_tc, tc_client_offset;
910 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
911 tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
912 min_weight = 0xffffffff;
913 tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
914 tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
915 NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
916 tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
917 tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
918 NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
920 for (tc = 0; tc < num_tc; tc++) {
921 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
925 sp_tc_map |= (1 << tc);
927 if (!tc_req->use_wfq)
931 wfq_tc_map |= (1 << tc);
933 /* Find minimal weight */
934 if (tc_req->weight < min_weight)
935 min_weight = tc_req->weight;
939 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset));
942 ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset));
944 /* Write WFQ weights */
945 for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
946 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
949 if (!tc_req->use_wfq)
952 /* Translate weight to bytes */
953 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
955 /* Write WFQ weight */
956 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + tc_weight_addr_diff * tc_client_offset, byte_weight);
958 /* Write WFQ upper bound */
959 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + tc_bound_addr_diff * tc_client_offset, NIG_ETS_UP_BOUND(byte_weight, req->mtu));
963 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
964 struct ecore_ptt *p_ptt,
965 struct init_nig_lb_rl_req* req)
967 u32 ctrl, inc_val, reg_offset;
970 /* Disable global MAC+LB RL */
971 ctrl = NIG_RL_BASE_TYPE << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
972 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
974 /* Configure and enable global MAC+LB RL */
975 if (req->lb_mac_rate) {
978 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
979 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
980 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val);
981 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu));
984 ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
985 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
988 /* Disable global LB-only RL */
989 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
990 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
992 /* Configure and enable global LB-only RL */
996 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
997 inc_val = NIG_RL_INC_VAL(req->lb_rate);
998 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val);
999 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu));
1002 ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1003 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1007 for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) {
1010 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1011 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1013 /* Configure and enable TC RL */
1014 if (!req->tc_rate[tc])
1018 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + reg_offset, NIG_RL_PERIOD_CLK_25M);
1019 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1020 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val);
1021 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1024 ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1025 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1029 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1030 struct ecore_ptt *p_ptt,
1031 struct init_nig_pri_tc_map_req* req)
1033 u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1034 u32 pri_tc_mask = 0;
1037 for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1038 if (!req->pri[pri].valid)
1041 pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
1042 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1045 /* Write priority -> TC mask */
1046 ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1048 /* Write TC -> priority mask */
1049 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1050 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]);
1051 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]);
1055 #endif /* UNUSED_HSI_FUNC */
1057 #ifndef UNUSED_HSI_FUNC
1059 /* PRS: ETS configuration constants */
1060 #define PRS_ETS_MIN_WFQ_BYTES 1600
1061 #define PRS_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1064 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1065 struct ecore_ptt *p_ptt,
1066 struct init_ets_req* req)
1068 u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1069 u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1071 tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1072 tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1074 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1075 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1079 sp_tc_map |= (1 << tc);
1081 if (!tc_req->use_wfq)
1084 /* Update WFQ map */
1085 wfq_tc_map |= (1 << tc);
1087 /* Find minimal weight */
1088 if (tc_req->weight < min_weight)
1089 min_weight = tc_req->weight;
1093 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1096 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map);
1098 /* Write WFQ weights */
1099 for (tc = 0; tc < NUM_OF_TCS; tc++) {
1100 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1103 if (!tc_req->use_wfq)
1106 /* Translate weight to bytes */
1107 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
1109 /* Write WFQ weight */
1110 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * tc_weight_addr_diff, byte_weight);
1112 /* Write WFQ upper bound */
1113 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu));
1117 #endif /* UNUSED_HSI_FUNC */
1118 #ifndef UNUSED_HSI_FUNC
1120 /* BRB: RAM configuration constants */
1121 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1122 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1123 #define BRB_BLOCK_SIZE 128
1124 #define BRB_MIN_BLOCKS_PER_TC 9
1125 #define BRB_HYST_BYTES 10240
1126 #define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1128 /* Temporary big RAM allocation - should be updated */
1129 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1130 struct ecore_ptt *p_ptt,
1131 struct init_brb_ram_req* req)
1133 u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1134 u32 active_port_blocks, reg_offset = 0;
1135 u8 port, active_ports = 0;
1137 tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
1138 min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
1139 total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : BRB_TOTAL_RAM_BLOCKS_BB;
1141 /* Find number of active ports */
1142 for (port = 0; port < MAX_NUM_PORTS; port++)
1143 if (req->num_active_tcs[port])
1146 active_port_blocks = (u32)(total_blocks / active_ports);
1148 for (port = 0; port < req->max_ports_per_engine; port++) {
1149 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1150 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1151 u32 tc_guaranteed_blocks;
1154 /* Calculate per-port sizes */
1155 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
1156 port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0;
1157 port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks;
1158 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1159 full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
1160 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1161 pause_xoff_th = tc_headroom_blocks;
1162 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1164 /* Init total size per port */
1165 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks);
1167 /* Init shared size per port */
1168 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks);
1170 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1171 /* Clear init values for non-active TCs */
1172 if (tc == req->num_active_tcs[port]) {
1173 tc_guaranteed_blocks = 0;
1180 /* Init guaranteed size per TC */
1181 ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset, tc_guaranteed_blocks);
1182 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, BRB_HYST_BLOCKS);
1184 /* Init pause/full thresholds per physical TC - for
1187 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th);
1188 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th);
1189 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th);
1190 ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th);
1192 /* Init pause/full thresholds per physical TC - for
1195 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th);
1196 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th);
1197 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th);
1198 ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th);
1203 #endif /* UNUSED_HSI_FUNC */
1204 #ifndef UNUSED_HSI_FUNC
1206 /* In MF, should be called once per engine to set EtherType of OuterTag */
1207 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1208 struct ecore_ptt *p_ptt, u32 ethType)
1210 /* Update PRS register */
1211 STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1213 /* Update NIG register */
1214 STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1216 /* Update PBF register */
1217 STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1220 /* In MF, should be called once per port to set EtherType of OuterTag */
1221 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
1222 struct ecore_ptt *p_ptt, u32 ethType)
1224 /* Update DORQ register */
1225 STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1228 #endif /* UNUSED_HSI_FUNC */
1231 #define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0)
1232 #define PRS_ETH_TUNN_FIC_FORMAT -188897008
1234 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1235 struct ecore_ptt *p_ptt,
1238 /* Update PRS register */
1239 ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1241 /* Update NIG register */
1242 ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1244 /* Update PBF register */
1245 ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1248 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1249 struct ecore_ptt *p_ptt,
1254 /* Update PRS register */
1255 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1256 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable);
1257 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1258 if (reg_val) /* TODO: handle E5 init */
1259 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1261 /* Update NIG register */
1262 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1263 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable);
1264 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1266 /* Update DORQ register */
1267 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1270 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1271 struct ecore_ptt *p_ptt,
1272 bool eth_gre_enable,
1277 /* Update PRS register */
1278 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1279 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
1280 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable);
1281 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1282 if (reg_val) /* TODO: handle E5 init */
1283 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1285 /* Update NIG register */
1286 reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1287 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
1288 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable);
1289 ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1291 /* Update DORQ registers */
1292 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
1293 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1296 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1297 struct ecore_ptt *p_ptt,
1301 /* Update PRS register */
1302 ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1304 /* Update NIG register */
1305 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1307 /* Update PBF register */
1308 ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1311 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1312 struct ecore_ptt *p_ptt,
1313 bool eth_geneve_enable,
1314 bool ip_geneve_enable)
1318 /* Update PRS register */
1319 reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1320 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable);
1321 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable);
1322 ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1323 if (reg_val) /* TODO: handle E5 init */
1324 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1326 /* Update NIG register */
1327 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0);
1328 ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
1330 /* EDPM with geneve tunnel not supported in BB */
1331 if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1334 /* Update DORQ registers */
1335 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0);
1336 ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0);
1339 #ifndef UNUSED_HSI_FUNC
1341 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23
1342 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
1343 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1344 #define PARSER_ETH_CONN_CM_HDR 0
1345 #define CAM_LINE_SIZE sizeof(u32)
1346 #define RAM_LINE_SIZE sizeof(u64)
1347 #define REG_SIZE sizeof(u32)
1350 void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
1351 struct ecore_ptt *p_ptt,
1354 union gft_cam_line_union cam_line;
1355 struct gft_ram_line ram_line;
1356 u32 i, *ram_line_ptr;
1358 ram_line_ptr = (u32*)&ram_line;
1360 /* Stop using gft logic, disable gft search */
1361 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1362 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
1364 /* Clean ram & cam for next rfs/gft session*/
1367 OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
1368 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
1371 OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
1373 /* Each iteration write to reg */
1374 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1375 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
1379 void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn,
1380 struct ecore_ptt *p_ptt)
1382 u32 rfs_cm_hdr_event_id;
1384 /* Set RFS event ID to be awakened i Tstorm By Prs */
1385 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1386 rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1387 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1388 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1391 void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
1392 struct ecore_ptt *p_ptt,
1399 u32 rfs_cm_hdr_event_id, *ram_line_ptr;
1400 union gft_cam_line_union cam_line;
1401 struct gft_ram_line ram_line;
1404 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1405 ram_line_ptr = (u32*)&ram_line;
1408 DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6\n");
1410 DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - udp or tcp\n");
1412 /* Set RFS event ID to be awakened i Tstorm By Prs */
1413 rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1414 rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1415 ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1417 /* Configure Registers for RFS mode */
1419 /* Enable gft search */
1420 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1422 /* Do not load context only cid in PRS on match. */
1423 ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1425 /* Cam line is now valid!! */
1426 cam_line.cam_line_mapped.camline = 0;
1427 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_VALID, 1);
1429 /* Filters are per PF!! */
1430 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1431 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1433 if (!(tcp && udp)) {
1434 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1436 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL);
1438 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL);
1441 if (!(ipv4 && ipv6)) {
1442 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1444 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4);
1446 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6);
1449 /* Write characteristics to cam */
1450 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
1451 cam_line.cam_line_mapped.camline = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id);
1453 /* Write line to RAM - compare to filter 4 tuple */
1456 SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1457 SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1458 SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1459 SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1460 SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
1461 SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1464 /* Each iteration write to reg */
1465 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1466 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
1468 /* Set default profile so that no filter match will happen */
1469 ram_line.lo = 0xffffffff;
1470 ram_line.hi = 0x3ff;
1472 for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1473 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + i*REG_SIZE, *(ram_line_ptr + i));
1477 #endif /* UNUSED_HSI_FUNC */
1479 /* Configure VF zone size mode*/
1480 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode, bool runtime_init)
1482 u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1483 u32 msdm_vf_offset_mask;
1485 if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1486 msdm_vf_size_log += 1;
1487 else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1488 msdm_vf_size_log += 2;
1490 msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1493 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log);
1494 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask);
1497 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1498 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1502 /* Get mstorm statistics for offset by VF zone size mode */
1503 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id, u16 vf_zone_size_mode)
1505 u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1507 if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) {
1508 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1509 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS);
1510 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1511 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS);
1517 /* Get mstorm VF producer offset by VF zone size mode */
1518 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id, u16 vf_zone_size_mode)
1520 u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1522 if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1523 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1524 offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id;
1525 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1526 offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id;
1532 #define CRC8_INIT_VALUE 0xFF
1533 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1535 /* Calculate and return CDU validation byte per connection type/region/cid */
1536 static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn * p_hwfn, u8 conn_type,
1539 const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1541 static u8 crc8_table_valid; /*automatically initialized to 0*/
1542 u8 crc, validation_byte = 0;
1543 u32 validation_string = 0;
1546 if (crc8_table_valid == 0) {
1547 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1548 crc8_table_valid = 1;
1551 /* The CRC is calculated on the String-to-compress:
1552 * [31:8] = {CID[31:20],CID[11:0]}
1556 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1557 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1559 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1560 validation_string |= ((region & 0xF) << 4);
1562 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1563 validation_string |= (conn_type & 0xF);
1565 /* Convert to big-endian and calculate CRC8*/
1566 data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1568 crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
1570 /* The validation byte [7:0] is composed:
1571 * for type A validation
1572 * [7] = active configuration bit
1575 * for type B validation
1576 * [7] = active configuration bit
1577 * [6:3] = connection_type[3:0]
1580 validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1582 if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1583 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1585 validation_byte |= crc & 0x7F;
1587 return validation_byte;
1590 /* Calcualte and set validation bytes for session context */
1591 void ecore_calc_session_ctx_validation(struct ecore_hwfn * p_hwfn, void *p_ctx_mem,
1592 u16 ctx_size, u8 ctx_type, u32 cid)
1594 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1596 p_ctx = (u8* const)p_ctx_mem;
1597 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1598 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1599 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1601 OSAL_MEMSET(p_ctx, 0, ctx_size);
1603 *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
1604 *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
1605 *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
1608 /* Calcualte and set validation bytes for task context */
1609 void ecore_calc_task_ctx_validation(struct ecore_hwfn * p_hwfn, void *p_ctx_mem,
1610 u16 ctx_size, u8 ctx_type, u32 tid)
1612 u8 *p_ctx, *region1_val_ptr;
1614 p_ctx = (u8* const)p_ctx_mem;
1615 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1617 OSAL_MEMSET(p_ctx, 0, ctx_size);
1619 *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
1623 /* Memset session context to 0 while preserving validation bytes */
1624 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1626 u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1627 u8 x_val, t_val, u_val;
1629 p_ctx = (u8* const)p_ctx_mem;
1630 x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1631 t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1632 u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1638 OSAL_MEMSET(p_ctx, 0, ctx_size);
1645 /* Memset task context to 0 while preserving validation bytes */
1646 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1648 u8 *p_ctx, *region1_val_ptr;
1651 p_ctx = (u8* const)p_ctx_mem;
1652 region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1654 region1_val = *region1_val_ptr;
1656 OSAL_MEMSET(p_ctx, 0, ctx_size);
1658 *region1_val_ptr = region1_val;
1661 /* Enable and configure context validation */
1662 void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_ptt *p_ptt)
1666 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1667 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1668 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1670 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1671 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1672 ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1674 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1675 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1676 ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);