]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlnx/qlnxe/ecore_init_fw_funcs.c
Merge ^/head r322398 through r322746.
[FreeBSD/FreeBSD.git] / sys / dev / qlnx / qlnxe / ecore_init_fw_funcs.c
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc. 
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 /*
29  * File : ecore_init_fw_funcs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "bcm_osal.h"
35 #include "ecore_hw.h"
36 #include "ecore_init_ops.h"
37 #include "reg_addr.h"
38 #include "ecore_rt_defs.h"
39 #include "ecore_hsi_common.h"
40 #include "ecore_hsi_init_func.h"
41 #include "ecore_hsi_eth.h"
42 #include "ecore_hsi_init_tool.h"
43 #include "ecore_iro.h"
44 #include "ecore_init_fw_funcs.h"
45
46 #define CDU_VALIDATION_DEFAULT_CFG 61
47
48 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
49         { 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */
50         { 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */
51         { 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */
52 };
53 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
54         { 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
55 };
56
57 /* General constants */
58 #define QM_PQ_MEM_4KB(pq_size)                  (pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
59 #define QM_PQ_SIZE_256B(pq_size)                (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
60 #define QM_INVALID_PQ_ID                0xffff
61
62 /* Feature enable */
63 #define QM_BYPASS_EN                    1
64 #define QM_BYTE_CRD_EN                  1
65
66 /* Other PQ constants */
67 #define QM_OTHER_PQS_PER_PF             4
68
69 /* VOQ constants */
70 #define QM_E5_NUM_EXT_VOQ               (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
71
72 /* WFQ constants: */
73
74 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
75 #define QM_WFQ_UPPER_BOUND              62500000 
76
77 /* Bit  of VOQ in WFQ VP PQ map */
78 #define QM_WFQ_VP_PQ_VOQ_SHIFT          0
79
80 /* Bit  of PF in WFQ VP PQ map */
81 #define QM_WFQ_VP_PQ_PF_E4_SHIFT        5
82 #define QM_WFQ_VP_PQ_PF_E5_SHIFT        6
83
84 /* 0x9000 = 4*9*1024 */
85 #define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000) 
86
87 /* 0.7 * upper bound (62500000) */
88 #define QM_WFQ_MAX_INC_VAL              43750000
89
90 /* Number of VOQs in E5 QmWfqCrd register */
91 #define QM_WFQ_CRD_E5_NUM_VOQS  16
92
93 /* RL constants: */
94  
95 /* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
96 #define QM_RL_UPPER_BOUND               62500000
97
98 /* Period in us */
99 #define QM_RL_PERIOD                    5
100
101 /* Period in 25MHz cycles */
102 #define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD) 
103
104 /* 0.7 * upper bound (62500000) */
105 #define QM_RL_MAX_INC_VAL               43750000
106
107 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
108  * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
109  * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
110  * although the credit increment value was the correct one and FW calculated
111  * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
112  * this point.
113  */
114 #define QM_RL_INC_VAL(rate)                     OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * QM_RL_PERIOD * 101)    / (8 * 100)), 1)
115
116 /* AFullOprtnstcCrdMask constants */
117 #define QM_OPPOR_LINE_VOQ_DEF           1
118 #define QM_OPPOR_FW_STOP_DEF            0
119 #define QM_OPPOR_PQ_EMPTY_DEF           1
120
121 /* Command Queue constants: */
122
123 /* Pure LB CmdQ lines (+spare) */
124 #define PBF_CMDQ_PURE_LB_LINES          150
125
126 #define PBF_CMDQ_LINES_E5_RSVD_RATIO    8
127
128 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq)               (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + ext_voq     * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET     - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
129
130 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq)   (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + ext_voq        * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET        - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
131
132 #define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
133
134 /* BTB: blocks constants (block size = 256B) */
135
136 /* 256B blocks in 9700B packet */
137 #define BTB_JUMBO_PKT_BLOCKS            38
138
139 /* Headroom per-port */
140 #define BTB_HEADROOM_BLOCKS             BTB_JUMBO_PKT_BLOCKS
141 #define BTB_PURE_LB_FACTOR              10
142
143 /* Factored (hence really 0.7) */
144 #define BTB_PURE_LB_RATIO               7
145
146 /* QM stop command constants */
147 #define QM_STOP_PQ_MASK_WIDTH           32
148 #define QM_STOP_CMD_ADDR                2
149 #define QM_STOP_CMD_STRUCT_SIZE         2
150 #define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
151 #define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
152 #define QM_STOP_CMD_PAUSE_MASK_MASK     -1
153 #define QM_STOP_CMD_GROUP_ID_OFFSET     1
154 #define QM_STOP_CMD_GROUP_ID_SHIFT      16
155 #define QM_STOP_CMD_GROUP_ID_MASK       15
156 #define QM_STOP_CMD_PQ_TYPE_OFFSET      1
157 #define QM_STOP_CMD_PQ_TYPE_SHIFT       24
158 #define QM_STOP_CMD_PQ_TYPE_MASK        1
159 #define QM_STOP_CMD_MAX_POLL_COUNT      100
160 #define QM_STOP_CMD_POLL_PERIOD_US      500
161
162 /* QM command macros */
163 #define QM_CMD_STRUCT_SIZE(cmd)           cmd##_STRUCT_SIZE
164 #define QM_CMD_SET_FIELD(var, cmd, field, value)        SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
165
166 #define QM_INIT_TX_PQ_MAP(map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr)    OSAL_MEMSET(&map, 0, sizeof(map));      SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1);  SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid);   SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id);   SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id);         SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq);         SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr);        STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map))
167
168 #define WRITE_PQ_INFO_TO_RAM                                                    1
169 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
170 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id)                                  XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4
171
172 /******************** INTERNAL IMPLEMENTATION *********************/
173
174 /* Returns the external VOQ number */
175 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
176                                                         u8 port_id,
177                                                         u8 tc,
178                                                         u8 max_phys_tcs_per_port)
179 {
180         if (tc == PURE_LB_TC)
181                 return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 : MAX_NUM_PORTS_BB) + port_id;
182         else
183                 return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS : max_phys_tcs_per_port) + tc;
184 }
185
186 /* Prepare PF RL enable/disable runtime init values */
187 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn,
188                                                            bool pf_rl_en)
189 {
190         STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
191         if (pf_rl_en) {
192                 u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
193                 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
194
195                 /* Enable RLs for all VOQs */
196                 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask);
197 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
198                 if (num_ext_voqs >= 32)
199                         STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, (u32)(voq_bit_mask >> 32));
200 #endif
201
202                 /* Write RL period */
203                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
204                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
205
206                 /* Set credit threshold for QM bypass flow */
207                 if (QM_BYPASS_EN)
208                         STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, QM_RL_UPPER_BOUND);
209         }
210 }
211
212 /* Prepare PF WFQ enable/disable runtime init values */
213 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn,
214                                                                 bool pf_wfq_en)
215 {
216         STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
217
218         /* Set credit threshold for QM bypass flow */
219         if (pf_wfq_en && QM_BYPASS_EN)
220                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
221 }
222
223 /* Prepare VPORT RL enable/disable runtime init values */
224 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn,
225                                                                   bool vport_rl_en)
226 {
227         STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0);
228         if (vport_rl_en) {
229
230                 /* Write RL period (use timer 0 only) */
231                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
232                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
233
234                 /* Set credit threshold for QM bypass flow */
235                 if (QM_BYPASS_EN)
236                         STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, QM_RL_UPPER_BOUND);
237         }
238 }
239
240 /* Prepare VPORT WFQ enable/disable runtime init values */
241 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn,
242                                                                    bool vport_wfq_en)
243 {
244         STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0);
245
246         /* Set credit threshold for QM bypass flow */
247         if (vport_wfq_en && QM_BYPASS_EN)
248                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, QM_WFQ_UPPER_BOUND);
249 }
250
251 /* Prepare runtime init values to allocate PBF command queue lines for
252  * the specified VOQ.
253  */
254 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
255                                                                                  u8 ext_voq,
256                                                                                  u16 cmdq_lines)
257 {
258         u32 qm_line_crd;        
259                 
260         qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
261
262         OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines);
263         STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
264         STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
265 }
266
267 /* Prepare runtime init values to allocate PBF command queue lines. */
268 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
269                                                                          u8 max_ports_per_engine,
270                                                                          u8 max_phys_tcs_per_port,
271                                                                          struct init_qm_port_params port_params[MAX_NUM_PORTS])
272 {
273         u8 tc, ext_voq, port_id, num_tcs_in_port;
274         u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
275
276         /* Clear PBF lines of all VOQs */
277         for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
278                 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
279
280         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
281                 u16 phys_lines, phys_lines_per_tc;
282
283                 if (!port_params[port_id].active)
284                         continue;
285
286                 /* Find number of command queue lines to divide between the
287                  * active physical TCs. In E5, 1/8 of the lines are reserved.
288                  * the lines for pure LB TC are subtracted.
289                  */
290                 phys_lines = port_params[port_id].num_pbf_cmd_lines;
291                 if (ECORE_IS_E5(p_hwfn->p_dev))
292                         phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO);
293                 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
294
295                 /* Find #lines per active physical TC */
296                 num_tcs_in_port = 0;
297                 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
298                         if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
299                                 num_tcs_in_port++;
300                 phys_lines_per_tc = phys_lines / num_tcs_in_port;
301
302                 /* Init registers per active TC */
303                 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
304                         ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
305                         if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
306                                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc);
307                 }
308
309                 /* Init registers for pure LB TC */
310                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
311                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES);
312         }
313 }
314
315 /* Prepare runtime init values to allocate guaranteed BTB blocks for the
316  * specified port. The guaranteed BTB space is divided between the TCs as
317  * follows (shared space Is currently not used):
318  * 1. Parameters:
319  *    B - BTB blocks for this port
320  *    C - Number of physical TCs for this port
321  * 2. Calculation:
322  *    a. 38 blocks (9700B jumbo frame) are allocated for global per port
323  *       headroom.
324  *    b. B = B - 38 (remainder after global headroom allocation).
325  *    c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
326  *    d. B = B \96 MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
327  *    e. B/C blocks are allocated for each physical TC.
328  * Assumptions:
329  * - MTU is up to 9700 bytes (38 blocks)
330  * - All TCs are considered symmetrical (same rate and packet size)
331  * - No optimization for lossy TC (all are considered lossless). Shared space
332  *   is not enabled and allocated for each TC.
333  */
334 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
335                                                                          u8 max_ports_per_engine,
336                                                                          u8 max_phys_tcs_per_port,
337                                                                          struct init_qm_port_params port_params[MAX_NUM_PORTS])
338 {
339         u32 usable_blocks, pure_lb_blocks, phys_blocks;
340         u8 tc, ext_voq, port_id, num_tcs_in_port;
341
342         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
343                 if (!port_params[port_id].active)
344                         continue;
345
346                 /* Subtract headroom blocks */
347                 usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS;
348
349                 /* Find blocks per physical TC. use factor to avoid floating
350                  * arithmethic.
351                  */
352                 num_tcs_in_port = 0;
353                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
354                         if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
355                                 num_tcs_in_port++;
356
357                 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
358                 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR);
359                 phys_blocks = (usable_blocks - pure_lb_blocks) / num_tcs_in_port;
360
361                 /* Init physical TCs */
362                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
363                         if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
364                                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
365                                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), phys_blocks);
366                         }
367                 }
368
369                 /* Init pure LB TC */
370                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
371                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks);
372         }
373 }
374
375 /* Prepare Tx PQ mapping runtime init values for the specified PF */
376 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
377                                     struct ecore_ptt *p_ptt,
378                                     u8 port_id,
379                                     u8 pf_id,
380                                     u8 max_phys_tcs_per_port,
381                                     u32 num_pf_cids,
382                                     u32 num_vf_cids,
383                                     u16 start_pq,
384                                     u16 num_pf_pqs,
385                                     u16 num_vf_pqs,
386                                     u8 start_vport,
387                                     u32 base_mem_addr_4kb,
388                                     struct init_qm_pq_params *pq_params,
389                                     struct init_qm_vport_params *vport_params)
390 {
391         /* A bit per Tx PQ indicating if the PQ is associated with a VF */
392         u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
393         u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
394         u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
395         u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
396
397         num_pqs = num_pf_pqs + num_vf_pqs;
398
399         first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
400         last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
401
402         pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
403         vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
404         mem_addr_4kb = base_mem_addr_4kb;
405
406         /* Set mapping from PQ group to PF */
407         for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
408                 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
409
410         /* Set PQ sizes */
411         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(num_pf_cids));
412         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, QM_PQ_SIZE_256B(num_vf_cids));
413
414         /* Go over all Tx PQs */
415         for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
416                 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
417                 u8 ext_voq, vport_id_in_pf;
418                 bool is_vf_pq, rl_valid;
419                 u16 first_tx_pq_id;
420
421                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
422                 is_vf_pq = (i >= num_pf_pqs);
423                 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls;
424
425                 /* Update first Tx PQ of VPORT/TC */
426                 vport_id_in_pf = pq_params[i].vport_id - start_vport;
427                 first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
428                 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
429                         u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ? QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT));
430
431                         /* Create new VP PQ */
432                         vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id;
433                         first_tx_pq_id = pq_id;
434
435                         /* Map VP PQ to VOQ and PF */
436                         STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val);
437                 }
438
439                 /* Check RL ID */
440                 if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls)
441                         DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
442
443                 /* Prepare PQ map entry */
444                 if (ECORE_IS_E5(p_hwfn->p_dev)) {
445                         struct qm_rf_pq_map_e5 tx_pq_map;
446                         QM_INIT_TX_PQ_MAP(tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
447                 }
448                 else {
449                         struct qm_rf_pq_map_e4 tx_pq_map;
450                         QM_INIT_TX_PQ_MAP(tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
451                 }
452
453                 /* Set base address */
454                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb);
455
456                 
457                 if (WRITE_PQ_INFO_TO_RAM != 0)
458                 {
459                         u32 pq_info = 0;
460                         pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id, port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0);
461                         ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info);
462                 }
463
464                 /* If VF PQ, add indication to PQ VF mask */
465                 if (is_vf_pq) {
466                         tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
467                         mem_addr_4kb += vport_pq_mem_4kb;
468                 }
469                 else {
470                         mem_addr_4kb += pq_mem_4kb;
471                 }
472         }
473
474         /* Store Tx PQ VF mask to size select register */
475         for (i = 0; i < num_tx_pq_vf_masks; i++)
476                 if (tx_pq_vf_mask[i])
477                         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]);
478 }
479
480 /* Prepare Other PQ mapping runtime init values for the specified PF */
481 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
482                                        u8 pf_id,
483                                        u32 num_pf_cids,
484                                        u32 num_tids,
485                                        u32 base_mem_addr_4kb)
486 {
487         u32 pq_size, pq_mem_4kb, mem_addr_4kb;
488         u16 i, pq_id, pq_group;
489
490         /* A single other PQ group is used in each PF, where PQ group i is used
491          * in PF i.
492          */
493         pq_group = pf_id;
494         pq_size = num_pf_cids + num_tids;
495         pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
496         mem_addr_4kb = base_mem_addr_4kb;
497
498         /* Map PQ group to PF */
499         STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id));
500
501         /* Set PQ sizes */       
502         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size));
503
504         /* Set base address */
505         for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
506                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb);
507                 mem_addr_4kb += pq_mem_4kb;
508         }
509 }
510
511 /* Prepare PF WFQ runtime init values for the specified PF.
512  * Return -1 on error.
513  */
514 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
515                                                                 u8 port_id,
516                                                                 u8 pf_id,
517                                                                 u16 pf_wfq,
518                                                                 u8 max_phys_tcs_per_port,
519                                                                 u16 num_tx_pqs,
520                                                                 struct init_qm_pq_params *pq_params)
521 {
522         u32 inc_val, crd_reg_offset;
523         u8 ext_voq;
524         u16 i;
525
526         inc_val = QM_WFQ_INC_VAL(pf_wfq);
527         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
528                 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
529                 return -1;
530         }
531
532         for (i = 0; i < num_tx_pqs; i++) {
533                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
534                 crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ?
535                         (ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id :
536                         (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB);
537                 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
538                 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
539                 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
540         }
541
542         return 0;
543 }
544
545 /* Prepare PF RL runtime init values for the specified PF.
546  * Return -1 on error.
547  */
548 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn,
549                                                           u8 pf_id,
550                                                           u32 pf_rl)
551 {
552         u32 inc_val;
553         
554         inc_val = QM_RL_INC_VAL(pf_rl);
555         if (inc_val > QM_RL_MAX_INC_VAL) {
556                 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
557                 return -1;
558         }
559
560         STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
561         STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
562         STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
563
564         return 0;
565 }
566
567 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
568  * Return -1 on error.
569  */
570 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
571                                                                 u8 num_vports,
572                                                                 struct init_qm_vport_params *vport_params)
573 {
574         u16 vport_pq_id;
575         u32 inc_val;
576         u8 tc, i;
577
578         /* Go over all PF VPORTs */
579         for (i = 0; i < num_vports; i++) {
580                 if (!vport_params[i].vport_wfq)
581                         continue;
582
583                 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
584                 if (inc_val > QM_WFQ_MAX_INC_VAL) {
585                         DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
586                         return -1;
587                 }
588
589                 /* Each VPORT can have several VPORT PQ IDs for various TCs */
590                 for (tc = 0; tc < NUM_OF_TCS; tc++) {
591                         vport_pq_id = vport_params[i].first_tx_pq_id[tc];
592                         if (vport_pq_id != QM_INVALID_PQ_ID) {
593                                 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
594                                 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val);
595                         }
596                 }
597         }
598
599         return 0;
600 }
601
602 /* Prepare VPORT RL runtime init values for the specified VPORTs.
603  * Return -1 on error.
604  */
605 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
606                                                                   u8 start_vport,
607                                                                   u8 num_vports,
608                                                                   struct init_qm_vport_params *vport_params)
609 {
610         u8 i, vport_id;
611         u32 inc_val;
612
613         if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
614                 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
615                 return -1;
616         }
617
618         /* Go over all PF VPORTs */
619         for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
620                 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
621                 if (inc_val > QM_RL_MAX_INC_VAL) {
622                         DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
623                         return -1;
624                 }
625
626                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, (u32)QM_RL_CRD_REG_SIGN_BIT);
627                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
628                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val);
629         }
630
631         return 0;
632 }
633
634 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
635                                                                            struct ecore_ptt *p_ptt)
636 {
637         u32 reg_val, i;
638
639         for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) {
640                 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
641                 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
642         }
643
644         /* Check if timeout while waiting for SDM command ready */
645         if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
646                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG, "Timeout when waiting for QM SDM command ready signal\n");
647                 return false;
648         }
649
650         return true;
651 }
652
653 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
654                                                           struct ecore_ptt *p_ptt,
655                                                           u32 cmd_addr,
656                                                           u32 cmd_data_lsb,
657                                                           u32 cmd_data_msb)
658 {
659         if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
660                 return false;
661
662         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
663         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
664         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
665         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
666         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
667
668         return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
669 }
670
671
672 /******************** INTERFACE IMPLEMENTATION *********************/
673
674 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
675                          u32 num_vf_cids,
676                          u32 num_tids,
677                          u16 num_pf_pqs,
678                          u16 num_vf_pqs)
679 {
680         return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
681                    QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
682                    QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
683 }
684
685 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
686                                                         u8 max_ports_per_engine,
687                                                         u8 max_phys_tcs_per_port,
688                                                         bool pf_rl_en,
689                                                         bool pf_wfq_en,
690                                                         bool vport_rl_en,
691                                                         bool vport_wfq_en,
692                                                         struct init_qm_port_params port_params[MAX_NUM_PORTS])
693 {
694         u32 mask;
695
696         /* Init AFullOprtnstcCrdMask */
697         mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
698                 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
699                 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
700                 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
701                 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
702                 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
703                 (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
704                 (QM_OPPOR_PQ_EMPTY_DEF << QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
705         STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
706
707         /* Enable/disable PF RL */
708         ecore_enable_pf_rl(p_hwfn, pf_rl_en);
709
710         /* Enable/disable PF WFQ */
711         ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
712
713         /* Enable/disable VPORT RL */
714         ecore_enable_vport_rl(p_hwfn, vport_rl_en);
715
716         /* Enable/disable VPORT WFQ */
717         ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
718
719         /* Init PBF CMDQ line credit */
720         ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
721
722         /* Init BTB blocks in PBF */
723         ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, max_phys_tcs_per_port, port_params);
724
725         return 0;
726 }
727
728 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
729                         struct ecore_ptt *p_ptt,
730                         u8 port_id,
731                         u8 pf_id,
732                         u8 max_phys_tcs_per_port,
733                         u32 num_pf_cids,
734                         u32 num_vf_cids,
735                         u32 num_tids,
736                         u16 start_pq,
737                         u16 num_pf_pqs,
738                         u16 num_vf_pqs,
739                         u8 start_vport,
740                         u8 num_vports,
741                         u16 pf_wfq,
742                         u32 pf_rl,
743                         struct init_qm_pq_params *pq_params,
744                         struct init_qm_vport_params *vport_params)
745 {
746         u32 other_mem_size_4kb;
747         u8 tc, i;
748
749         other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
750
751         /* Clear first Tx PQ ID array for each VPORT */
752         for(i = 0; i < num_vports; i++)
753                 for(tc = 0; tc < NUM_OF_TCS; tc++)
754                         vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
755
756         /* Map Other PQs (if any) */
757 #if QM_OTHER_PQS_PER_PF > 0
758         ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
759 #endif
760
761         /* Map Tx PQs */
762         ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
763                                 start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
764
765         /* Init PF WFQ */
766         if (pf_wfq)
767                 if (ecore_pf_wfq_rt_init(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, num_pf_pqs + num_vf_pqs, pq_params))
768                 return -1;
769
770         /* Init PF RL */
771         if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
772                 return -1;
773
774         /* Set VPORT WFQ */
775         if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
776                 return -1;
777
778         /* Set VPORT RL */
779         if (ecore_vport_rl_rt_init(p_hwfn, start_vport, num_vports, vport_params))
780                 return -1;
781
782         return 0;
783 }
784
785 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
786                                           struct ecore_ptt *p_ptt,
787                                           u8 pf_id,
788                                           u16 pf_wfq)
789 {
790         u32 inc_val;
791         
792         inc_val = QM_WFQ_INC_VAL(pf_wfq);
793         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
794                 DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
795                 return -1;
796         }
797
798         ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
799
800         return 0;
801 }
802
803 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
804                                          struct ecore_ptt *p_ptt,
805                                          u8 pf_id,
806                                          u32 pf_rl)
807 {
808         u32 inc_val;
809         
810         inc_val = QM_RL_INC_VAL(pf_rl);
811         if (inc_val > QM_RL_MAX_INC_VAL) {
812                 DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
813                 return -1;
814         }
815
816         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
817         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
818
819         return 0;
820 }
821
822 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
823                                                  struct ecore_ptt *p_ptt,
824                                                  u16 first_tx_pq_id[NUM_OF_TCS],
825                                                  u16 vport_wfq)
826 {
827         u16 vport_pq_id;
828         u32 inc_val;
829         u8 tc;
830
831         inc_val = QM_WFQ_INC_VAL(vport_wfq);
832         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
833                 DP_NOTICE(p_hwfn, true, "Invalid VPORT WFQ weight configuration\n");
834                 return -1;
835         }
836
837         for (tc = 0; tc < NUM_OF_TCS; tc++) {
838                 vport_pq_id = first_tx_pq_id[tc];
839                 if (vport_pq_id != QM_INVALID_PQ_ID) {
840                         ecore_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
841                 }
842         }
843
844         return 0;
845 }
846
847 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
848                                                 struct ecore_ptt *p_ptt,
849                                                 u8 vport_id,
850                                                 u32 vport_rl)
851 {
852         u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
853
854         if (vport_id >= max_qm_global_rls) {
855                 DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
856                 return -1;
857         }
858
859         inc_val = QM_RL_INC_VAL(vport_rl);
860         if (inc_val > QM_RL_MAX_INC_VAL) {
861                 DP_NOTICE(p_hwfn, true, "Invalid VPORT rate-limit configuration\n");
862                 return -1;
863         }
864
865         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
866         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
867
868         return 0;
869 }
870
871 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
872                                                         struct ecore_ptt *p_ptt,
873                                                         bool is_release_cmd,
874                                                         bool is_tx_pq,
875                                                         u16 start_pq,
876                                                         u16 num_pqs)
877 {
878         u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = {0};
879         u32 pq_mask = 0, last_pq, pq_id;
880
881         last_pq = start_pq + num_pqs - 1;
882
883         /* Set command's PQ type */
884         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
885
886         /* Go over requested PQs */
887         for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
888
889                 /* Set PQ bit in mask (stop command only) */
890                 if (!is_release_cmd)
891                         pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
892
893                 /* If last PQ or end of PQ mask, write command */
894                 if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) {
895                         QM_CMD_SET_FIELD(cmd_arr, (u32)QM_STOP_CMD, PAUSE_MASK, pq_mask);
896                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH);
897                         if (!ecore_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], cmd_arr[1]))
898                                 return false;
899                         pq_mask = 0;
900                 }
901         }
902
903         return true;
904 }
905
906 #ifndef UNUSED_HSI_FUNC
907
908 /* NIG: ETS configuration constants */
909 #define NIG_TX_ETS_CLIENT_OFFSET        4
910 #define NIG_LB_ETS_CLIENT_OFFSET        1
911 #define NIG_ETS_MIN_WFQ_BYTES           1600
912
913 /* NIG: ETS constants */
914 #define NIG_ETS_UP_BOUND(weight,mtu)            (2 * ((weight) > (mtu) ? (weight) : (mtu)))
915
916 /* NIG: RL constants */
917
918 /* Byte base type value */
919 #define NIG_RL_BASE_TYPE                1
920
921 /* Period in us */
922 #define NIG_RL_PERIOD                   1
923
924 /* Period in 25MHz cycles */
925 #define NIG_RL_PERIOD_CLK_25M           (25 * NIG_RL_PERIOD) 
926
927 /* Rate in mbps */
928 #define NIG_RL_INC_VAL(rate)            (((rate) * NIG_RL_PERIOD) / 8)
929
930 #define NIG_RL_MAX_VAL(inc_val,mtu)             (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
931
932 /* NIG: packet prioritry configuration constants */
933 #define NIG_PRIORITY_MAP_TC_BITS        4
934
935
936 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
937                                                 struct ecore_ptt *p_ptt,
938                                                 struct init_ets_req* req,
939                                                 bool is_lb)
940 {
941         u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
942         u32 tc_bound_base_addr, tc_bound_addr_diff;
943         u8 sp_tc_map = 0, wfq_tc_map = 0;
944         u8 tc, num_tc, tc_client_offset;
945
946         num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
947         tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
948         min_weight = 0xffffffff;
949         tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
950         tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
951                                                                   NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
952         tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
953         tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
954                                                                  NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
955
956         for (tc = 0; tc < num_tc; tc++) {
957                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
958
959                 /* Update SP map */
960                 if (tc_req->use_sp)
961                         sp_tc_map |= (1 << tc);
962
963                 if (!tc_req->use_wfq)
964                         continue;
965
966                 /* Update WFQ map */
967                 wfq_tc_map |= (1 << tc);
968                                 
969                 /* Find minimal weight */
970                 if (tc_req->weight < min_weight)
971                         min_weight = tc_req->weight;
972         }
973
974         /* Write SP map */
975         ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : NIG_REG_TX_ARB_CLIENT_IS_STRICT, (sp_tc_map << tc_client_offset));
976
977         /* Write WFQ map */
978         ecore_wr(p_hwfn, p_ptt, is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, (wfq_tc_map << tc_client_offset));
979
980         /* Write WFQ weights */
981         for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
982                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
983                 u32 byte_weight;
984
985                 if (!tc_req->use_wfq)
986                         continue;
987
988                 /* Translate weight to bytes */
989                 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
990
991                 /* Write WFQ weight */
992                 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr + tc_weight_addr_diff * tc_client_offset, byte_weight);
993
994                 /* Write WFQ upper bound */
995                 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr + tc_bound_addr_diff * tc_client_offset, NIG_ETS_UP_BOUND(byte_weight, req->mtu));
996         }
997 }
998
999 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1000                                                   struct ecore_ptt *p_ptt,
1001                                                   struct init_nig_lb_rl_req* req)
1002 {
1003         u32 ctrl, inc_val, reg_offset;
1004         u8 tc;
1005
1006         /* Disable global MAC+LB RL */
1007         ctrl = NIG_RL_BASE_TYPE << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1008         ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1009
1010         /* Configure and enable global MAC+LB RL */
1011         if (req->lb_mac_rate) {
1012
1013                 /* Configure  */
1014                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
1015                 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1016                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, inc_val);
1017                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu));
1018
1019                 /* Enable */
1020                 ctrl |= 1 << NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1021                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1022         }
1023
1024         /* Disable global LB-only RL */
1025         ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1026         ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1027
1028         /* Configure and enable global LB-only RL */
1029         if (req->lb_rate) {
1030
1031                 /* Configure  */
1032                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
1033                 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1034                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, inc_val);
1035                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, NIG_RL_MAX_VAL(inc_val, req->mtu));
1036
1037                 /* Enable */
1038                 ctrl |= 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1039                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1040         }
1041
1042         /* Per-TC RLs */
1043         for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) {
1044
1045                 /* Disable TC RL */
1046                 ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1047                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1048
1049                 /* Configure and enable TC RL */
1050                 if (!req->tc_rate[tc])
1051                         continue;
1052
1053                 /* Configure */
1054                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + reg_offset, NIG_RL_PERIOD_CLK_25M);
1055                 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1056                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + reg_offset, inc_val);
1057                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1058
1059                 /* Enable */
1060                 ctrl |= 1 << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1061                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1062         }
1063 }
1064
1065 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1066                                                            struct ecore_ptt *p_ptt,
1067                                                            struct init_nig_pri_tc_map_req* req)
1068 {
1069         u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1070         u32 pri_tc_mask = 0;
1071         u8 pri, tc;
1072
1073         for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1074                 if (!req->pri[pri].valid)
1075                         continue;
1076
1077                 pri_tc_mask |= (req->pri[pri].tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
1078                 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1079         }
1080
1081         /* Write priority -> TC mask */
1082         ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1083
1084         /* Write TC -> priority mask */
1085         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1086                 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, tc_pri_mask[tc]);
1087                 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, tc_pri_mask[tc]);
1088         }
1089 }
1090
1091 #endif /* UNUSED_HSI_FUNC */
1092
1093 #ifndef UNUSED_HSI_FUNC
1094
1095 /* PRS: ETS configuration constants */
1096 #define PRS_ETS_MIN_WFQ_BYTES           1600
1097 #define PRS_ETS_UP_BOUND(weight,mtu)            (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1098
1099
1100 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1101                                                 struct ecore_ptt *p_ptt,
1102                                                 struct init_ets_req* req)
1103 {
1104         u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1105         u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1106
1107         tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1108         tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1109
1110         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1111                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1112
1113                 /* Update SP map */
1114                 if (tc_req->use_sp)
1115                         sp_tc_map |= (1 << tc);
1116
1117                 if (!tc_req->use_wfq)
1118                         continue;
1119
1120                 /* Update WFQ map */
1121                 wfq_tc_map |= (1 << tc);
1122                                 
1123                 /* Find minimal weight */
1124                 if (tc_req->weight < min_weight)
1125                         min_weight = tc_req->weight;
1126         }
1127
1128         /* Write SP map */
1129         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1130
1131         /* Write WFQ map */
1132         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, wfq_tc_map);
1133
1134         /* Write WFQ weights */
1135         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1136                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1137                 u32 byte_weight;
1138
1139                 if (!tc_req->use_wfq)
1140                         continue;
1141
1142                 /* Translate weight to bytes */
1143                 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / min_weight;
1144
1145                 /* Write WFQ weight */
1146                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc * tc_weight_addr_diff, byte_weight);
1147
1148                 /* Write WFQ upper bound */
1149                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight, req->mtu));
1150         }
1151 }
1152
1153 #endif /* UNUSED_HSI_FUNC */
1154 #ifndef UNUSED_HSI_FUNC
1155
1156 /* BRB: RAM configuration constants */
1157 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1158 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1159 #define BRB_BLOCK_SIZE          128 
1160 #define BRB_MIN_BLOCKS_PER_TC   9
1161 #define BRB_HYST_BYTES          10240
1162 #define BRB_HYST_BLOCKS         (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1163
1164 /* Temporary big RAM allocation - should be updated */
1165 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1166                                                 struct ecore_ptt *p_ptt,
1167                                                 struct init_brb_ram_req* req)
1168 {
1169         u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1170         u32 active_port_blocks, reg_offset = 0;
1171         u8 port, active_ports = 0;
1172
1173         tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
1174         min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
1175         total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : BRB_TOTAL_RAM_BLOCKS_BB;
1176
1177         /* Find number of active ports */
1178         for (port = 0; port < MAX_NUM_PORTS; port++)
1179                 if (req->num_active_tcs[port])
1180                         active_ports++;
1181
1182         active_port_blocks = (u32)(total_blocks / active_ports);
1183
1184         for (port = 0; port < req->max_ports_per_engine; port++) {
1185                 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1186                 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1187                 u32 tc_guaranteed_blocks;
1188                 u8 tc;
1189
1190                 /* Calculate per-port sizes */
1191                 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
1192                 port_blocks = req->num_active_tcs[port] ? active_port_blocks : 0;
1193                 port_guaranteed_blocks = req->num_active_tcs[port] * tc_guaranteed_blocks;
1194                 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1195                 full_xoff_th = req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
1196                 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1197                 pause_xoff_th = tc_headroom_blocks;
1198                 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1199
1200                 /* Init total size per port */
1201                 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, port_blocks);
1202
1203                 /* Init shared size per port */
1204                 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, port_shared_blocks);
1205
1206                 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1207                         /* Clear init values for non-active TCs */
1208                         if (tc == req->num_active_tcs[port]) {
1209                                 tc_guaranteed_blocks = 0;
1210                                 full_xoff_th = 0;
1211                                 full_xon_th = 0;
1212                                 pause_xoff_th = 0;
1213                                 pause_xon_th = 0;
1214                         }
1215
1216                         /* Init guaranteed size per TC */
1217                         ecore_wr(p_hwfn, p_ptt, BRB_REG_TC_GUARANTIED_0 + reg_offset, tc_guaranteed_blocks);
1218                         ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, BRB_HYST_BLOCKS);
1219
1220                         /* Init pause/full thresholds per physical TC - for
1221                          * loopback traffic.
1222                          */
1223                         ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th);
1224                         ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th);
1225                         ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th);
1226                         ecore_wr(p_hwfn, p_ptt, BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th);
1227
1228                         /* Init pause/full thresholds per physical TC - for
1229                          * main traffic.
1230                          */
1231                         ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + reg_offset, full_xoff_th);
1232                         ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + reg_offset, full_xon_th);
1233                         ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + reg_offset, pause_xoff_th);
1234                         ecore_wr(p_hwfn, p_ptt, BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + reg_offset, pause_xon_th);
1235                 }
1236         }
1237 }
1238
1239 #endif /* UNUSED_HSI_FUNC */
1240 #ifndef UNUSED_HSI_FUNC
1241
1242 /* In MF, should be called once per engine to set EtherType of OuterTag */
1243 void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1244 {
1245         /* Update PRS register */
1246         STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1247
1248         /* Update NIG register */
1249         STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1250
1251         /* Update PBF register */
1252         STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
1253 }
1254
1255 /* In MF, should be called once per port to set EtherType of OuterTag */
1256 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1257 {
1258         /* Update DORQ register */
1259         STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1260 }
1261
1262 #endif /* UNUSED_HSI_FUNC */
1263
1264
1265 #define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0)
1266 #define PRS_ETH_TUNN_FIC_FORMAT        -188897008
1267
1268 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1269         struct ecore_ptt *p_ptt,
1270         u16 dest_port)
1271 {
1272         /* Update PRS register */
1273         ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1274
1275         /* Update NIG register */
1276         ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1277
1278         /* Update PBF register */
1279         ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1280 }
1281
1282 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1283         struct ecore_ptt *p_ptt,
1284         bool vxlan_enable)
1285 {
1286         u32 reg_val;
1287
1288         /* Update PRS register */
1289         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1290         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable);
1291         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1292         if (reg_val) /* TODO: handle E5 init */
1293                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1294
1295         /* Update NIG register */
1296         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1297         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, vxlan_enable);
1298         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1299
1300         /* Update DORQ register */
1301         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
1302 }
1303
1304 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1305         struct ecore_ptt *p_ptt,
1306         bool eth_gre_enable,
1307         bool ip_gre_enable)
1308 {
1309         u32 reg_val;
1310
1311         /* Update PRS register */
1312         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1313         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
1314         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,  ip_gre_enable);
1315         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1316         if (reg_val) /* TODO: handle E5 init */
1317                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1318
1319         /* Update NIG register */
1320         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1321         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, eth_gre_enable);
1322         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,  ip_gre_enable);
1323         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1324
1325         /* Update DORQ registers */
1326         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
1327         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
1328 }
1329
1330 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1331         struct ecore_ptt *p_ptt,
1332         u16 dest_port)
1333
1334 {
1335         /* Update PRS register */
1336         ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1337
1338         /* Update NIG register */
1339         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1340
1341         /* Update PBF register */
1342         ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1343 }
1344
1345 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1346                              struct ecore_ptt *p_ptt,
1347                              bool eth_geneve_enable,
1348                              bool ip_geneve_enable)
1349 {
1350         u32 reg_val;
1351
1352         /* Update PRS register */
1353         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1354         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, eth_geneve_enable);   
1355         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable);
1356         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1357         if (reg_val) /* TODO: handle E5 init */
1358                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, (u32)PRS_ETH_TUNN_FIC_FORMAT);
1359
1360         /* Update NIG register */
1361         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0);
1362         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
1363     
1364         /* EDPM with geneve tunnel not supported in BB */
1365         if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1366                 return;
1367
1368         /* Update DORQ registers */
1369         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0);
1370         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0);
1371 }
1372
1373 #ifndef UNUSED_HSI_FUNC
1374
1375 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1376 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1377 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1378 #define PARSER_ETH_CONN_CM_HDR 0
1379 #define CAM_LINE_SIZE sizeof(u32)
1380 #define RAM_LINE_SIZE sizeof(u64) 
1381 #define REG_SIZE sizeof(u32)
1382
1383
1384 void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
1385         struct ecore_ptt *p_ptt,
1386         u16 pf_id)
1387 {
1388         union gft_cam_line_union cam_line;
1389         struct gft_ram_line ram_line;
1390         u32 i, *ram_line_ptr;
1391
1392         ram_line_ptr = (u32*)&ram_line;
1393
1394         /* Stop using gft logic, disable gft search */
1395         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1396         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
1397
1398         /* Clean ram & cam for next rfs/gft session*/
1399
1400         /* Zero camline */
1401         OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
1402         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
1403
1404         /* Zero ramline */
1405         OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
1406
1407         /* Each iteration write to reg */
1408         for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1409                 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
1410 }
1411
1412
1413 void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn,
1414         struct ecore_ptt *p_ptt)
1415 {
1416         u32 rfs_cm_hdr_event_id;
1417
1418     /* Set RFS event ID to be awakened i Tstorm By Prs */
1419     rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1420     rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1421     rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1422     ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1423 }
1424
1425 void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
1426     struct ecore_ptt *p_ptt,
1427     u16 pf_id,
1428     bool tcp,
1429     bool udp,
1430     bool ipv4,
1431     bool ipv6)
1432 {
1433         u32 rfs_cm_hdr_event_id, *ram_line_ptr;
1434         union gft_cam_line_union cam_line;
1435         struct gft_ram_line ram_line;
1436         int i;
1437
1438         rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1439         ram_line_ptr = (u32*)&ram_line;
1440
1441         if (!ipv6 && !ipv4)
1442                 DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6\n");
1443         if (!tcp && !udp)
1444                 DP_NOTICE(p_hwfn, true, "set_rfs_mode_enable: must accept at least on of - udp or tcp\n");
1445
1446         /* Set RFS event ID to be awakened i Tstorm By Prs */
1447         rfs_cm_hdr_event_id |=  T_ETH_PACKET_MATCH_RFS_EVENTID << PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1448         rfs_cm_hdr_event_id |=  PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1449         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1450     
1451         /* Configure Registers for RFS mode */
1452
1453         /* Enable gft search */
1454         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1455
1456         /* Do not load context only cid in PRS on match. */
1457         ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1458       
1459         /* Cam line is now valid!! */
1460         cam_line.cam_line_mapped.camline = 0;
1461         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_VALID, 1);
1462
1463         /* Filters are per PF!! */
1464         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1465         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1466
1467         if (!(tcp && udp)) {
1468                 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1469                 if (tcp)
1470                         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL);
1471                 else
1472                         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL);
1473         }
1474
1475         if (!(ipv4 && ipv6)) {
1476                 SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1477                 if (ipv4)
1478                         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4);
1479                 else
1480                         SET_FIELD(cam_line.cam_line_mapped.camline, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6);
1481         }
1482
1483         /* Write characteristics to cam */
1484         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id, cam_line.cam_line_mapped.camline);
1485         cam_line.cam_line_mapped.camline = ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE*pf_id);
1486
1487         /* Write line to RAM - compare to filter 4 tuple */
1488         ram_line.lo = 0;
1489         ram_line.hi= 0;
1490         SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
1491         SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
1492         SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1493         SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
1494         SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
1495         SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
1496
1497
1498         /* Each iteration write to reg */
1499         for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1500                 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + i*REG_SIZE, *(ram_line_ptr + i));
1501
1502         /* Set default profile so that no filter match will happen */
1503         ram_line.lo = 0xffffffff;
1504         ram_line.hi = 0x3ff;
1505
1506         for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
1507                 ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*PRS_GFT_CAM_LINES_NO_MATCH + i*REG_SIZE, *(ram_line_ptr + i));
1508 }
1509
1510
1511 #endif /* UNUSED_HSI_FUNC */
1512
1513 /* Configure VF zone size mode*/
1514 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 mode, bool runtime_init)
1515 {
1516         u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1517         u32 msdm_vf_offset_mask; 
1518
1519         if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1520                 msdm_vf_size_log += 1; 
1521         else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1522                 msdm_vf_size_log += 2; 
1523
1524         msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1525
1526         if (runtime_init) {
1527                 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET, msdm_vf_size_log);
1528                 STORE_RT_REG(p_hwfn, PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET, msdm_vf_offset_mask);
1529         }
1530         else {
1531                 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1532                 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1533         }
1534 }
1535
1536 /* Get mstorm statistics for offset by VF zone size mode */
1537 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn, u16 stat_cnt_id, u16 vf_zone_size_mode)
1538 {
1539         u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1540
1541         if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) && (stat_cnt_id > MAX_NUM_PFS)) {
1542                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1543                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS);
1544                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1545                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * (stat_cnt_id - MAX_NUM_PFS);
1546         }
1547
1548         return offset;
1549 }
1550
1551 /* Get mstorm VF producer offset by VF zone size mode */
1552 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8 vf_queue_id, u16 vf_zone_size_mode)
1553 {
1554         u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1555
1556         if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1557                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1558                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id;
1559                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1560                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) * vf_id;
1561         }
1562
1563         return offset;
1564 }
1565
1566 #define CRC8_INIT_VALUE 0xFF
1567 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1568
1569 /* Calculate and return CDU validation byte per connection type/region/cid */
1570 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1571 {
1572         const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1573
1574         static u8 crc8_table_valid;     /*automatically initialized to 0*/
1575         u8 crc, validation_byte = 0;
1576         u32 validation_string = 0;
1577         u32 data_to_crc;
1578
1579         if (crc8_table_valid == 0) {
1580                 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1581                 crc8_table_valid = 1;
1582         }
1583
1584         /* The CRC is calculated on the String-to-compress:
1585          * [31:8]  = {CID[31:20],CID[11:0]}
1586          * [7:4]   = Region
1587          * [3:0]   = Type
1588          */
1589         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1590                 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1591
1592         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1593                 validation_string |= ((region & 0xF) << 4);
1594
1595         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1596                 validation_string |= (conn_type & 0xF);
1597
1598         /* Convert to big-endian and calculate CRC8*/
1599         data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1600
1601         crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
1602
1603         /* The validation byte [7:0] is composed:
1604          * for type A validation
1605          * [7]          = active configuration bit
1606          * [6:0]        = crc[6:0]
1607          *
1608          * for type B validation
1609          * [7]          = active configuration bit
1610          * [6:3]        = connection_type[3:0]
1611          * [2:0]        = crc[2:0]
1612          */
1613         validation_byte |= ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1614
1615         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1616                 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1617         else
1618                 validation_byte |= crc & 0x7F;
1619
1620         return validation_byte;
1621 }
1622
1623 /* Calcualte and set validation bytes for session context */
1624 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1625                                        u8 ctx_type, u32 cid)
1626 {
1627         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1628
1629         p_ctx = (u8* const)p_ctx_mem;
1630         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1631         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1632         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1633
1634         OSAL_MEMSET(p_ctx, 0, ctx_size);
1635
1636         *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1637         *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1638         *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1639 }
1640
1641 /* Calcualte and set validation bytes for task context */
1642 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1643                                     u32 tid)
1644 {
1645         u8 *p_ctx, *region1_val_ptr;
1646
1647         p_ctx = (u8* const)p_ctx_mem;
1648         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1649
1650         OSAL_MEMSET(p_ctx, 0, ctx_size);
1651
1652         *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1653 }
1654
1655 /* Memset session context to 0 while preserving validation bytes */
1656 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1657 {
1658         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1659         u8 x_val, t_val, u_val;
1660
1661         p_ctx = (u8* const)p_ctx_mem;
1662         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1663         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1664         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1665
1666         x_val = *x_val_ptr;
1667         t_val = *t_val_ptr;
1668         u_val = *u_val_ptr;
1669
1670         OSAL_MEMSET(p_ctx, 0, ctx_size);
1671
1672         *x_val_ptr = x_val;
1673         *t_val_ptr = t_val;
1674         *u_val_ptr = u_val;
1675 }
1676
1677 /* Memset task context to 0 while preserving validation bytes */
1678 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1679 {
1680         u8 *p_ctx, *region1_val_ptr;
1681         u8 region1_val;
1682
1683         p_ctx = (u8* const)p_ctx_mem;
1684         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1685
1686         region1_val = *region1_val_ptr;
1687
1688         OSAL_MEMSET(p_ctx, 0, ctx_size);
1689
1690         *region1_val_ptr = region1_val;
1691 }
1692
1693 /* Enable and configure context validation */
1694 void ecore_enable_context_validation(struct ecore_hwfn * p_hwfn, struct ecore_ptt *p_ptt)
1695 {
1696         u32 ctx_validation;
1697
1698         /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
1699         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1700         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1701
1702         /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
1703         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1704         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1705
1706         /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
1707         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1708         ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1709 }