2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #include "common_hsi.h"
37 #include "ecore_hsi_common.h"
38 #include "ecore_hsi_eth.h"
39 #include "tcp_common.h"
40 #include "ecore_hsi_iscsi.h"
41 #include "ecore_hsi_fcoe.h"
42 #include "ecore_hsi_roce.h"
43 #include "ecore_hsi_iwarp.h"
44 #include "ecore_rt_defs.h"
45 #include "ecore_status.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_init_fw_funcs.h"
49 #include "ecore_cxt.h"
51 #include "ecore_dev_api.h"
52 #include "ecore_sriov.h"
53 #include "ecore_rdma.h"
54 #include "ecore_mcp.h"
56 /* Max number of connection types in HW (DQ/CDU etc.) */
57 #define MAX_CONN_TYPES PROTOCOLID_COMMON
58 #define NUM_TASK_TYPES 2
59 #define NUM_TASK_PF_SEGMENTS 4
60 #define NUM_TASK_VF_SEGMENTS 1
62 /* Doorbell-Queue constants */
63 #define DQ_RANGE_SHIFT 4
64 #define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
66 /* Searcher constants */
67 #define SRC_MIN_NUM_ELEMS 256
69 /* Timers constants */
71 #define TM_ALIGN (1 << TM_SHIFT)
72 #define TM_ELEM_SIZE 4
75 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
76 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
78 /* ILT entry structure */
79 #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
80 #define ILT_ENTRY_PHY_ADDR_SHIFT 0
81 #define ILT_ENTRY_VALID_MASK 0x1ULL
82 #define ILT_ENTRY_VALID_SHIFT 52
83 #define ILT_ENTRY_IN_REGS 2
84 #define ILT_REG_SIZE_IN_BYTES 4
86 /* connection context union */
88 struct e4_core_conn_context core_ctx;
89 struct e4_eth_conn_context eth_ctx;
90 struct e4_iscsi_conn_context iscsi_ctx;
91 struct e4_fcoe_conn_context fcoe_ctx;
92 struct e4_roce_conn_context roce_ctx;
95 /* TYPE-0 task context - iSCSI, FCOE */
96 union type0_task_context {
97 struct e4_iscsi_task_context iscsi_ctx;
98 struct e4_fcoe_task_context fcoe_ctx;
101 /* TYPE-1 task context - ROCE */
102 union type1_task_context {
103 struct e4_rdma_task_context roce_ctx;
111 #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
112 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
114 #define CONN_CXT_SIZE(p_hwfn) \
115 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
117 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
118 #define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
120 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
121 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
123 /* Alignment is inherent to the type1_task_context structure */
124 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
126 /* PF per protocl configuration object */
127 #define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
128 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
130 struct ecore_tid_seg {
136 struct ecore_conn_type_cfg {
139 struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
142 /* ILT Client configuration,
143 * Per connection type (protocol) resources (cids, tis, vf cids etc.)
144 * 1 - for connection context (CDUC) and for each task context we need two
145 * values, for regular task context and for force load memory
147 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
148 #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
151 #define CDUT_SEG_BLK(n) (1 + (u8)(n))
152 #define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
154 struct ilt_cfg_pair {
159 struct ecore_ilt_cli_blk {
160 u32 total_size; /* 0 means not active */
161 u32 real_size_in_page;
163 u32 dynamic_line_cnt;
166 struct ecore_ilt_client_cfg {
170 struct ilt_cfg_pair first;
171 struct ilt_cfg_pair last;
172 struct ilt_cfg_pair p_size;
174 /* ILT client blocks for PF */
175 struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
178 /* ILT client blocks for VFs */
179 struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
185 * Protocol acquired CID lists
186 * PF start line in ILT
188 struct ecore_dma_mem {
194 #define MAP_WORD_SIZE sizeof(unsigned long)
195 #define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
197 struct ecore_cid_acquired_map {
200 unsigned long *cid_map;
203 struct ecore_cxt_mngr {
204 /* Per protocl configuration */
205 struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
207 /* computed ILT structure */
208 struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
210 /* Task type sizes */
211 u32 task_type_size[NUM_TASK_TYPES];
213 /* total number of VFs for this hwfn -
214 * ALL VFs are symmetric in terms of HW resources
219 struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
220 /* TBD - do we want this allocated to reserve space? */
221 struct ecore_cid_acquired_map acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
223 /* ILT shadow table */
224 struct ecore_dma_mem *ilt_shadow;
227 /* Mutex for a dynamic ILT allocation */
231 struct ecore_dma_mem *t2;
236 /* The infrastructure originally was very generic and context/task
237 * oriented - per connection-type we would set how many of those
238 * are needed, and later when determining how much memory we're
239 * needing for a given block we'd iterate over all the relevant
241 * But since then we've had some additional resources, some of which
242 * require memory which is indepent of the general context/task
243 * scheme. We add those here explicitly per-feature.
246 /* total number of SRQ's for this hwfn */
250 /* Maximal number of L2 steering filters */
253 /* TODO - VF arfs filters ? */
256 /* check if resources/configuration is required according to protocol type */
257 static bool src_proto(enum protocol_type type)
259 return type == PROTOCOLID_ISCSI ||
260 type == PROTOCOLID_FCOE ||
261 type == PROTOCOLID_IWARP;
264 static bool tm_cid_proto(enum protocol_type type)
266 return type == PROTOCOLID_ISCSI ||
267 type == PROTOCOLID_FCOE ||
268 type == PROTOCOLID_ROCE ||
269 type == PROTOCOLID_IWARP;
272 static bool tm_tid_proto(enum protocol_type type)
274 return type == PROTOCOLID_FCOE;
277 /* counts the iids for the CDU/CDUC ILT client configuration */
278 struct ecore_cdu_iids {
283 static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
284 struct ecore_cdu_iids *iids)
288 for (type = 0; type < MAX_CONN_TYPES; type++) {
289 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
290 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
294 /* counts the iids for the Searcher block configuration */
295 struct ecore_src_iids {
300 static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
301 struct ecore_src_iids *iids)
305 for (i = 0; i < MAX_CONN_TYPES; i++) {
309 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
310 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
313 /* Add L2 filtering filters in addition */
314 iids->pf_cids += p_mngr->arfs_count;
317 /* counts the iids for the Timers block configuration */
318 struct ecore_tm_iids {
320 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
326 static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
327 struct ecore_tm_iids *iids)
329 bool tm_vf_required = false;
330 bool tm_required = false;
333 /* Timers is a special case -> we don't count how many cids require
334 * timers but what's the max cid that will be used by the timer block.
335 * therefore we traverse in reverse order, and once we hit a protocol
336 * that requires the timers memory, we'll sum all the protocols up
339 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
340 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
342 if (tm_cid_proto(i) || tm_required) {
343 if (p_cfg->cid_count)
346 iids->pf_cids += p_cfg->cid_count;
349 if (tm_cid_proto(i) || tm_vf_required) {
350 if (p_cfg->cids_per_vf)
351 tm_vf_required = true;
353 iids->per_vf_cids += p_cfg->cids_per_vf;
356 if (tm_tid_proto(i)) {
357 struct ecore_tid_seg *segs = p_cfg->tid_seg;
359 /* for each segment there is at most one
360 * protocol for which count is not 0.
362 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
363 iids->pf_tids[j] += segs[j].count;
365 /* The last array elelment is for the VFs. As for PF
366 * segments there can be only one protocol for
367 * which this value is not 0.
369 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
373 iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
374 iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
375 iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
377 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
378 iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
379 iids->pf_tids_total += iids->pf_tids[j];
383 static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
384 struct ecore_qm_iids *iids)
386 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
387 struct ecore_tid_seg *segs;
388 u32 vf_cids = 0, type, j;
391 for (type = 0; type < MAX_CONN_TYPES; type++) {
392 iids->cids += p_mngr->conn_cfg[type].cid_count;
393 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
395 segs = p_mngr->conn_cfg[type].tid_seg;
396 /* for each segment there is at most one
397 * protocol for which count is not 0.
399 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
400 iids->tids += segs[j].count;
402 /* The last array elelment is for the VFs. As for PF
403 * segments there can be only one protocol for
404 * which this value is not 0.
406 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
409 iids->vf_cids += vf_cids * p_mngr->vf_count;
410 iids->tids += vf_tids * p_mngr->vf_count;
412 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
413 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
414 iids->cids, iids->vf_cids, iids->tids, vf_tids);
417 static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
420 struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
423 /* Find the protocol with tid count > 0 for this segment.
424 Note: there can only be one and this is already validated.
426 for (i = 0; i < MAX_CONN_TYPES; i++) {
427 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
428 return &p_cfg->conn_cfg[i].tid_seg[seg];
433 static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn,
434 u32 num_srqs, u32 num_xrc_srqs)
436 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
438 p_mgr->srq_count = num_srqs;
439 p_mgr->xrc_srq_count = num_xrc_srqs;
442 u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
444 return p_hwfn->p_cxt_mngr->srq_count;
447 u32 ecore_cxt_get_xrc_srq_count(struct ecore_hwfn *p_hwfn)
449 return p_hwfn->p_cxt_mngr->xrc_srq_count;
452 u32 ecore_cxt_get_ilt_page_size(struct ecore_hwfn *p_hwfn,
453 enum ilt_clients ilt_client)
455 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
456 struct ecore_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
458 return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
461 static u32 ecore_cxt_srqs_per_page(struct ecore_hwfn *p_hwfn)
465 page_size = ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
466 return page_size / SRQ_CXT_SIZE;
469 u32 ecore_cxt_get_total_srq_count(struct ecore_hwfn *p_hwfn)
471 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
474 total_srqs = p_mgr->srq_count;
476 /* XRC SRQs use the first and only the first SRQ ILT page. So if XRC
477 * SRQs are requested we need to allocate an extra SRQ ILT page for
478 * them. For that We increase the number of regular SRQs to cause the
479 * allocation of that extra page.
481 if (p_mgr->xrc_srq_count)
482 total_srqs += ecore_cxt_srqs_per_page(p_hwfn);
487 /* set the iids (cid/tid) count per protocol */
488 static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
489 enum protocol_type type,
490 u32 cid_count, u32 vf_cid_cnt)
492 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
493 struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
495 p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
496 p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
498 if (type == PROTOCOLID_ROCE) {
499 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
500 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
501 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
502 u32 align = elems_per_page * DQ_RANGE_ALIGN;
504 p_conn->cid_count = ROUNDUP(p_conn->cid_count, align);
508 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
509 enum protocol_type type,
513 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
515 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
518 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
519 enum protocol_type type)
521 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
524 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
525 enum protocol_type type)
530 for (i = 0; i < TASK_SEGMENTS; i++)
531 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
536 static void ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
537 enum protocol_type proto,
543 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
544 struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
546 p_seg->count = count;
547 p_seg->has_fl_mem = has_fl;
548 p_seg->type = seg_type;
551 /* the *p_line parameter must be either 0 for the first invocation or the
552 value returned in the previous invocation.
554 static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
555 struct ecore_ilt_cli_blk *p_blk,
560 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
562 /* verify that it's called once for each block */
563 if (p_blk->total_size)
566 p_blk->total_size = total_size;
567 p_blk->real_size_in_page = 0;
569 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
570 p_blk->start_line = start_line;
573 static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
574 struct ecore_ilt_client_cfg *p_cli,
575 struct ecore_ilt_cli_blk *p_blk,
577 enum ilt_clients client_id)
579 if (!p_blk->total_size)
583 p_cli->first.val = *p_line;
585 p_cli->active = true;
586 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
587 p_cli->last.val = *p_line-1;
589 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
590 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
591 client_id, p_cli->first.val, p_cli->last.val,
592 p_blk->total_size, p_blk->real_size_in_page,
596 static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
597 enum ilt_clients ilt_client)
599 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
600 struct ecore_ilt_client_cfg *p_cli;
601 u32 lines_to_skip = 0;
604 /* TBD MK: ILT code should be simplified once PROTO enum is changed */
606 if (ilt_client == ILT_CLI_CDUC) {
607 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
609 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
610 (u32)CONN_CXT_SIZE(p_hwfn);
612 lines_to_skip = cid_count / cxts_per_p;
615 return lines_to_skip;
618 static struct ecore_ilt_client_cfg *
619 ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli)
621 p_cli->active = false;
622 p_cli->first.val = 0;
627 static struct ecore_ilt_cli_blk *
628 ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
630 p_blk->total_size = 0;
634 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
637 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
638 u32 curr_line, total, i, task_size, line;
639 struct ecore_ilt_client_cfg *p_cli;
640 struct ecore_ilt_cli_blk *p_blk;
641 struct ecore_cdu_iids cdu_iids;
642 struct ecore_src_iids src_iids;
643 struct ecore_qm_iids qm_iids;
644 struct ecore_tm_iids tm_iids;
645 struct ecore_tid_seg *p_seg;
647 OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
648 OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
649 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
650 OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
652 p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
654 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
655 "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
656 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
659 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
661 curr_line = p_mngr->pf_start_line;
664 p_cli->pf_total_lines = 0;
666 /* get the counters for the CDUC,CDUC and QM clients */
667 ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
669 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
671 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
673 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
674 total, CONN_CXT_SIZE(p_hwfn));
676 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
677 p_cli->pf_total_lines = curr_line - p_blk->start_line;
679 p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
683 p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
684 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
686 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
687 total, CONN_CXT_SIZE(p_hwfn));
689 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
690 p_cli->vf_total_lines = curr_line - p_blk->start_line;
692 for (i = 1; i < p_mngr->vf_count; i++)
693 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
697 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
698 p_cli->first.val = curr_line;
700 /* first the 'working' task memory */
701 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
702 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
703 if (!p_seg || p_seg->count == 0)
706 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
707 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
708 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
709 p_mngr->task_type_size[p_seg->type]);
711 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
715 /* next the 'init' task memory (forced load memory) */
716 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
717 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
718 if (!p_seg || p_seg->count == 0)
721 p_blk = ecore_cxt_set_blk(
722 &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
724 if (!p_seg->has_fl_mem) {
725 /* The segment is active (total size pf 'working'
726 * memory is > 0) but has no FL (forced-load, Init)
729 * 1. The total-size in the corrsponding FL block of
730 * the ILT client is set to 0 - No ILT line are
731 * provisioned and no ILT memory allocated.
733 * 2. The start-line of said block is set to the
734 * start line of the matching working memory
735 * block in the ILT client. This is later used to
736 * configure the CDU segment offset registers and
737 * results in an FL command for TIDs of this
738 * segement behaves as regular load commands
739 * (loading TIDs from the working memory).
741 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
743 ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
746 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
748 ecore_ilt_cli_blk_fill(p_cli, p_blk,
750 p_mngr->task_type_size[p_seg->type]);
752 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
755 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
758 p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
759 if (p_seg && p_seg->count) {
760 /* Stricly speaking we need to iterate over all VF
761 * task segment types, but a VF has only 1 segment
764 /* 'working' memory */
765 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
767 p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
768 ecore_ilt_cli_blk_fill(p_cli, p_blk,
770 p_mngr->task_type_size[p_seg->type]);
772 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
776 p_blk = ecore_cxt_set_blk(
777 &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
778 if (!p_seg->has_fl_mem) {
779 /* see comment above */
780 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
781 ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
783 task_size = p_mngr->task_type_size[p_seg->type];
784 ecore_ilt_cli_blk_fill(p_cli, p_blk,
787 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
790 p_cli->vf_total_lines = curr_line -
791 p_cli->vf_blks[0].start_line;
793 /* Now for the rest of the VFs */
794 for (i = 1; i < p_mngr->vf_count; i++) {
795 /* don't set p_blk i.e. don't clear total_size */
796 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
797 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
800 /* don't set p_blk i.e. don't clear total_size */
801 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
802 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
808 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
809 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
811 ecore_cxt_qm_iids(p_hwfn, &qm_iids);
812 total = ecore_qm_pf_mem_size(qm_iids.cids,
813 qm_iids.vf_cids, qm_iids.tids,
814 p_hwfn->qm_info.num_pqs,
815 p_hwfn->qm_info.num_vf_pqs);
817 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
818 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
819 qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
820 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
822 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
825 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
826 p_cli->pf_total_lines = curr_line - p_blk->start_line;
829 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
830 ecore_cxt_src_iids(p_mngr, &src_iids);
832 /* Both the PF and VFs searcher connections are stored in the per PF
833 * database. Thus sum the PF searcher cids and all the VFs searcher
836 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
838 u32 local_max = OSAL_MAX_T(u32, total,
841 total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
843 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
844 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
845 total * sizeof(struct src_ent),
846 sizeof(struct src_ent));
848 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
850 p_cli->pf_total_lines = curr_line - p_blk->start_line;
854 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
855 ecore_cxt_tm_iids(p_mngr, &tm_iids);
856 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
858 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
859 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
860 total * TM_ELEM_SIZE,
863 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
865 p_cli->pf_total_lines = curr_line - p_blk->start_line;
869 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
871 p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
872 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
873 total * TM_ELEM_SIZE,
876 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
879 p_cli->vf_total_lines = curr_line - p_blk->start_line;
880 for (i = 1; i < p_mngr->vf_count; i++) {
881 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
886 /* TSDM (SRQ CONTEXT) */
887 total = ecore_cxt_get_total_srq_count(p_hwfn);
889 p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
890 p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
891 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
892 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
894 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
896 p_cli->pf_total_lines = curr_line - p_blk->start_line;
899 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
901 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
902 RESC_NUM(p_hwfn, ECORE_ILT)) {
906 return ECORE_SUCCESS;
909 u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines)
911 struct ecore_ilt_client_cfg *p_cli;
912 u32 excess_lines, available_lines;
913 struct ecore_cxt_mngr *p_mngr;
914 u32 ilt_page_size, elem_size;
915 struct ecore_tid_seg *p_seg;
918 available_lines = RESC_NUM(p_hwfn, ECORE_ILT);
919 excess_lines = used_lines - available_lines;
924 if (!ECORE_IS_RDMA_PERSONALITY(p_hwfn))
927 p_mngr = p_hwfn->p_cxt_mngr;
928 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
929 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
931 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
932 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
933 if (!p_seg || p_seg->count == 0)
936 elem_size = p_mngr->task_type_size[p_seg->type];
940 return (ilt_page_size / elem_size) * excess_lines;
943 DP_ERR(p_hwfn, "failed computing excess ILT lines\n");
947 static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
949 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
955 for (i = 0; i < p_mngr->t2_num_pages; i++)
956 if (p_mngr->t2[i].p_virt)
957 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
958 p_mngr->t2[i].p_virt,
959 p_mngr->t2[i].p_phys,
962 OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
963 p_mngr->t2 = OSAL_NULL;
966 static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
968 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
969 u32 conn_num, total_size, ent_per_page, psz, i;
970 struct ecore_ilt_client_cfg *p_src;
971 struct ecore_src_iids src_iids;
972 struct ecore_dma_mem *p_t2;
973 enum _ecore_status_t rc;
975 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
977 /* if the SRC ILT client is inactive - there are no connection
978 * requiring the searcer, leave.
980 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
982 return ECORE_SUCCESS;
984 ecore_cxt_src_iids(p_mngr, &src_iids);
985 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
986 total_size = conn_num * sizeof(struct src_ent);
988 /* use the same page size as the SRC ILT client */
989 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
990 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
993 p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
994 p_mngr->t2_num_pages *
995 sizeof(struct ecore_dma_mem));
997 DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
1002 /* allocate t2 pages */
1003 for (i = 0; i < p_mngr->t2_num_pages; i++) {
1004 u32 size = OSAL_MIN_T(u32, total_size, psz);
1005 void **p_virt = &p_mngr->t2[i].p_virt;
1007 *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1008 &p_mngr->t2[i].p_phys,
1010 if (!p_mngr->t2[i].p_virt) {
1014 OSAL_MEM_ZERO(*p_virt, size);
1015 p_mngr->t2[i].size = size;
1019 /* Set the t2 pointers */
1021 /* entries per page - must be a power of two */
1022 ent_per_page = psz / sizeof(struct src_ent);
1024 p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
1026 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
1027 p_mngr->last_free = (u64)p_t2->p_phys +
1028 ((conn_num - 1) & (ent_per_page - 1)) *
1029 sizeof(struct src_ent);
1031 for (i = 0; i < p_mngr->t2_num_pages; i++) {
1032 u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
1033 struct src_ent *entries = p_mngr->t2[i].p_virt;
1034 u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
1037 for (j = 0; j < ent_num - 1; j++) {
1039 (j + 1) * sizeof(struct src_ent);
1040 entries[j].next = OSAL_CPU_TO_BE64(val);
1043 if (i < p_mngr->t2_num_pages - 1)
1044 val = (u64)p_mngr->t2[i + 1].p_phys;
1047 entries[j].next = OSAL_CPU_TO_BE64(val);
1049 conn_num -= ent_num;
1052 return ECORE_SUCCESS;
1055 ecore_cxt_src_t2_free(p_hwfn);
1059 #define for_each_ilt_valid_client(pos, clients) \
1060 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
1061 if (!clients[pos].active) { \
1066 /* Total number of ILT lines used by this PF */
1067 static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
1072 for_each_ilt_valid_client(i, ilt_clients)
1073 size += (ilt_clients[i].last.val -
1074 ilt_clients[i].first.val + 1);
1079 static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
1081 struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
1082 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1085 if (p_mngr->ilt_shadow == OSAL_NULL)
1088 ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
1090 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
1091 struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
1094 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1098 p_dma->p_virt = OSAL_NULL;
1100 OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
1101 p_mngr->ilt_shadow = OSAL_NULL;
1104 static enum _ecore_status_t ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
1105 struct ecore_ilt_cli_blk *p_blk,
1106 enum ilt_clients ilt_client,
1107 u32 start_line_offset)
1109 struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
1110 u32 lines, line, sz_left, lines_to_skip = 0;
1112 /* Special handling for RoCE that supports dynamic allocation */
1113 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn) &&
1114 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
1115 return ECORE_SUCCESS;
1117 lines_to_skip = p_blk->dynamic_line_cnt;
1119 if (!p_blk->total_size)
1120 return ECORE_SUCCESS;
1122 sz_left = p_blk->total_size;
1123 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) -
1125 line = p_blk->start_line + start_line_offset -
1126 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
1128 for (; lines; lines--) {
1133 size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
1134 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1138 OSAL_MEM_ZERO(p_virt, size);
1140 ilt_shadow[line].p_phys = p_phys;
1141 ilt_shadow[line].p_virt = p_virt;
1142 ilt_shadow[line].size = size;
1144 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1145 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1146 line, (unsigned long long)p_phys, p_virt, size);
1152 return ECORE_SUCCESS;
1155 static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
1157 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1158 struct ecore_ilt_client_cfg *clients = p_mngr->clients;
1159 struct ecore_ilt_cli_blk *p_blk;
1161 enum _ecore_status_t rc;
1163 size = ecore_cxt_ilt_shadow_size(clients);
1164 p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
1165 size * sizeof(struct ecore_dma_mem));
1167 if (p_mngr->ilt_shadow == OSAL_NULL) {
1168 DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
1170 goto ilt_shadow_fail;
1173 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1174 "Allocated 0x%x bytes for ilt shadow\n",
1175 (u32)(size * sizeof(struct ecore_dma_mem)));
1177 for_each_ilt_valid_client(i, clients) {
1178 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1179 p_blk = &clients[i].pf_blks[j];
1180 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1181 if (rc != ECORE_SUCCESS)
1182 goto ilt_shadow_fail;
1184 for (k = 0; k < p_mngr->vf_count; k++) {
1185 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1186 u32 lines = clients[i].vf_total_lines * k;
1188 p_blk = &clients[i].vf_blks[j];
1189 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
1191 if (rc != ECORE_SUCCESS)
1192 goto ilt_shadow_fail;
1197 return ECORE_SUCCESS;
1200 ecore_ilt_shadow_free(p_hwfn);
1204 static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
1206 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1209 for (type = 0; type < MAX_CONN_TYPES; type++) {
1210 OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
1211 p_mngr->acquired[type].cid_map = OSAL_NULL;
1212 p_mngr->acquired[type].max_count = 0;
1213 p_mngr->acquired[type].start_cid = 0;
1215 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
1216 OSAL_FREE(p_hwfn->p_dev,
1217 p_mngr->acquired_vf[type][vf].cid_map);
1218 p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
1219 p_mngr->acquired_vf[type][vf].max_count = 0;
1220 p_mngr->acquired_vf[type][vf].start_cid = 0;
1225 static enum _ecore_status_t
1226 ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
1227 u32 cid_start, u32 cid_count,
1228 struct ecore_cid_acquired_map *p_map)
1233 return ECORE_SUCCESS;
1235 size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
1236 p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
1237 if (p_map->cid_map == OSAL_NULL)
1240 p_map->max_count = cid_count;
1241 p_map->start_cid = cid_start;
1243 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1244 "Type %08x start: %08x count %08x\n",
1245 type, p_map->start_cid, p_map->max_count);
1247 return ECORE_SUCCESS;
1250 static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
1252 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1253 u32 start_cid = 0, vf_start_cid = 0;
1256 for (type = 0; type < MAX_CONN_TYPES; type++) {
1257 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1258 struct ecore_cid_acquired_map *p_map;
1260 /* Handle PF maps */
1261 p_map = &p_mngr->acquired[type];
1262 if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
1263 p_cfg->cid_count, p_map))
1266 /* Handle VF maps */
1267 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
1268 p_map = &p_mngr->acquired_vf[type][vf];
1269 if (ecore_cid_map_alloc_single(p_hwfn, type,
1276 start_cid += p_cfg->cid_count;
1277 vf_start_cid += p_cfg->cids_per_vf;
1280 return ECORE_SUCCESS;
1283 ecore_cid_map_free(p_hwfn);
1287 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
1289 struct ecore_ilt_client_cfg *clients;
1290 struct ecore_cxt_mngr *p_mngr;
1293 p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
1295 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
1299 /* Initialize ILT client registers */
1300 clients = p_mngr->clients;
1301 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1302 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1303 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1305 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1306 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1307 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1309 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1310 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1311 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1313 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1314 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1315 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1317 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1318 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1319 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1321 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1322 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1323 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1325 /* default ILT page size for all clients is 64K */
1326 for (i = 0; i < ILT_CLI_MAX; i++)
1327 p_mngr->clients[i].p_size.val = p_hwfn->p_dev->ilt_page_size;
1329 /* Initialize task sizes */
1330 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1331 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1333 if (p_hwfn->p_dev->p_iov_info)
1334 p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
1336 /* Initialize the dynamic ILT allocation mutex */
1337 #ifdef CONFIG_ECORE_LOCK_ALLOC
1338 OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
1340 OSAL_MUTEX_INIT(&p_mngr->mutex);
1342 /* Set the cxt mangr pointer priori to further allocations */
1343 p_hwfn->p_cxt_mngr = p_mngr;
1345 return ECORE_SUCCESS;
1348 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
1350 enum _ecore_status_t rc;
1352 /* Allocate the ILT shadow table */
1353 rc = ecore_ilt_shadow_alloc(p_hwfn);
1355 DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
1356 goto tables_alloc_fail;
1359 /* Allocate the T2 table */
1360 rc = ecore_cxt_src_t2_alloc(p_hwfn);
1362 DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
1363 goto tables_alloc_fail;
1366 /* Allocate and initialize the acquired cids bitmaps */
1367 rc = ecore_cid_map_alloc(p_hwfn);
1369 DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
1370 goto tables_alloc_fail;
1373 return ECORE_SUCCESS;
1376 ecore_cxt_mngr_free(p_hwfn);
1379 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
1381 if (!p_hwfn->p_cxt_mngr)
1384 ecore_cid_map_free(p_hwfn);
1385 ecore_cxt_src_t2_free(p_hwfn);
1386 ecore_ilt_shadow_free(p_hwfn);
1387 #ifdef CONFIG_ECORE_LOCK_ALLOC
1388 OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
1390 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
1392 p_hwfn->p_cxt_mngr = OSAL_NULL;
1395 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
1397 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1398 struct ecore_cid_acquired_map *p_map;
1399 struct ecore_conn_type_cfg *p_cfg;
1403 /* Reset acquired cids */
1404 for (type = 0; type < MAX_CONN_TYPES; type++) {
1407 p_cfg = &p_mngr->conn_cfg[type];
1408 if (p_cfg->cid_count) {
1409 p_map = &p_mngr->acquired[type];
1410 len = DIV_ROUND_UP(p_map->max_count,
1411 BITS_PER_MAP_WORD) *
1413 OSAL_MEM_ZERO(p_map->cid_map, len);
1416 if (!p_cfg->cids_per_vf)
1419 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
1420 p_map = &p_mngr->acquired_vf[type][vf];
1421 len = DIV_ROUND_UP(p_map->max_count,
1422 BITS_PER_MAP_WORD) *
1424 OSAL_MEM_ZERO(p_map->cid_map, len);
1429 /* HW initialization helper (per Block, per phase) */
1432 #define CDUC_CXT_SIZE_SHIFT \
1433 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1435 #define CDUC_CXT_SIZE_MASK \
1436 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1438 #define CDUC_BLOCK_WASTE_SHIFT \
1439 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1441 #define CDUC_BLOCK_WASTE_MASK \
1442 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1444 #define CDUC_NCIB_SHIFT \
1445 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1447 #define CDUC_NCIB_MASK \
1448 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1450 #define CDUT_TYPE0_CXT_SIZE_SHIFT \
1451 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1453 #define CDUT_TYPE0_CXT_SIZE_MASK \
1454 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1455 CDUT_TYPE0_CXT_SIZE_SHIFT)
1457 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1458 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1460 #define CDUT_TYPE0_BLOCK_WASTE_MASK \
1461 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1462 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1464 #define CDUT_TYPE0_NCIB_SHIFT \
1465 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1467 #define CDUT_TYPE0_NCIB_MASK \
1468 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1469 CDUT_TYPE0_NCIB_SHIFT)
1471 #define CDUT_TYPE1_CXT_SIZE_SHIFT \
1472 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1474 #define CDUT_TYPE1_CXT_SIZE_MASK \
1475 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1476 CDUT_TYPE1_CXT_SIZE_SHIFT)
1478 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1479 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1481 #define CDUT_TYPE1_BLOCK_WASTE_MASK \
1482 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1483 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1485 #define CDUT_TYPE1_NCIB_SHIFT \
1486 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1488 #define CDUT_TYPE1_NCIB_MASK \
1489 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1490 CDUT_TYPE1_NCIB_SHIFT)
1492 static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
1494 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1496 /* CDUC - connection configuration */
1497 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1498 cxt_size = CONN_CXT_SIZE(p_hwfn);
1499 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1500 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1502 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1503 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1504 SET_FIELD(cdu_params, (u32)CDUC_NCIB, elems_per_page);
1505 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1507 /* CDUT - type-0 tasks configuration */
1508 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1509 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1510 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1511 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1513 /* cxt size and block-waste are multipes of 8 */
1515 SET_FIELD(cdu_params, (u32)CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1516 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1517 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1518 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1520 /* CDUT - type-1 tasks configuration */
1521 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1522 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1523 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1525 /* cxt size and block-waste are multipes of 8 */
1527 SET_FIELD(cdu_params, (u32)CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1528 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1529 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1530 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1534 #define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1535 #define CDU_SEG_REG_TYPE_MASK 0x1
1536 #define CDU_SEG_REG_OFFSET_SHIFT 0
1537 #define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1539 static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
1541 struct ecore_ilt_client_cfg *p_cli;
1542 struct ecore_tid_seg *p_seg;
1543 u32 cdu_seg_params, offset;
1546 static const u32 rt_type_offset_arr[] = {
1547 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1548 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1549 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1550 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1553 static const u32 rt_type_offset_fl_arr[] = {
1554 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1555 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1556 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1557 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1560 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1562 /* There are initializations only for CDUT during pf Phase */
1563 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1565 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
1569 /* Note: start_line is already adjusted for the CDU
1570 * segment register granularity, so we just need to
1571 * divide. Adjustment is implicit as we assume ILT
1572 * Page size is larger than 32K!
1574 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1575 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1576 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1579 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1580 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1581 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i],
1584 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1585 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1586 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1589 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1590 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1591 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i],
1597 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1600 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1601 struct ecore_mcp_link_state *p_link;
1602 struct ecore_qm_iids iids;
1604 OSAL_MEM_ZERO(&iids, sizeof(iids));
1605 ecore_cxt_qm_iids(p_hwfn, &iids);
1607 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
1609 ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
1610 p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
1612 iids.cids, iids.vf_cids, iids.tids,
1614 qm_info->num_pqs - qm_info->num_vf_pqs,
1615 qm_info->num_vf_pqs,
1616 qm_info->start_vport,
1617 qm_info->num_vports, qm_info->pf_wfq,
1618 qm_info->pf_rl, p_link->speed,
1619 p_hwfn->qm_info.qm_pq_params,
1620 p_hwfn->qm_info.qm_vport_params);
1624 static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
1626 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1630 static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
1632 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1633 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1635 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1636 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1638 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1639 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1641 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1642 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1644 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1645 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1647 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1648 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1650 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1651 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1653 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1654 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1656 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1657 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1659 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1660 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1662 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1663 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1665 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1666 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1668 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1669 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1671 /* Connection types 6 & 7 are not in use, yet they must be configured
1672 * as the highest possible connection. Not configuring them means the
1673 * defaults will be used, and with a large number of cids a bug may
1674 * occur, if the defaults will be smaller than dq_pf_max_cid /
1677 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1678 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1680 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1681 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1684 static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
1686 struct ecore_ilt_client_cfg *ilt_clients;
1689 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1690 for_each_ilt_valid_client(i, ilt_clients) {
1691 STORE_RT_REG(p_hwfn,
1692 ilt_clients[i].first.reg,
1693 ilt_clients[i].first.val);
1694 STORE_RT_REG(p_hwfn,
1695 ilt_clients[i].last.reg,
1696 ilt_clients[i].last.val);
1697 STORE_RT_REG(p_hwfn,
1698 ilt_clients[i].p_size.reg,
1699 ilt_clients[i].p_size.val);
1703 static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
1705 struct ecore_ilt_client_cfg *p_cli;
1708 /* For simplicty we set the 'block' to be an ILT page */
1709 if (p_hwfn->p_dev->p_iov_info) {
1710 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
1712 STORE_RT_REG(p_hwfn,
1713 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1714 p_iov->first_vf_in_pf);
1715 STORE_RT_REG(p_hwfn,
1716 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1717 p_iov->first_vf_in_pf + p_iov->total_vfs);
1720 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1721 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1722 if (p_cli->active) {
1723 STORE_RT_REG(p_hwfn,
1724 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1726 STORE_RT_REG(p_hwfn,
1727 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1728 p_cli->pf_total_lines);
1729 STORE_RT_REG(p_hwfn,
1730 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1731 p_cli->vf_total_lines);
1734 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1735 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1736 if (p_cli->active) {
1737 STORE_RT_REG(p_hwfn,
1738 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1740 STORE_RT_REG(p_hwfn,
1741 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1742 p_cli->pf_total_lines);
1743 STORE_RT_REG(p_hwfn,
1744 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1745 p_cli->vf_total_lines);
1748 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1749 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1750 if (p_cli->active) {
1751 STORE_RT_REG(p_hwfn,
1752 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET,
1754 STORE_RT_REG(p_hwfn,
1755 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1756 p_cli->pf_total_lines);
1757 STORE_RT_REG(p_hwfn,
1758 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1759 p_cli->vf_total_lines);
1763 /* ILT (PSWRQ2) PF */
1764 static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
1766 struct ecore_ilt_client_cfg *clients;
1767 struct ecore_cxt_mngr *p_mngr;
1768 struct ecore_dma_mem *p_shdw;
1769 u32 line, rt_offst, i;
1771 ecore_ilt_bounds_init(p_hwfn);
1772 ecore_ilt_vf_bounds_init(p_hwfn);
1774 p_mngr = p_hwfn->p_cxt_mngr;
1775 p_shdw = p_mngr->ilt_shadow;
1776 clients = p_hwfn->p_cxt_mngr->clients;
1778 for_each_ilt_valid_client(i, clients) {
1779 /* Client's 1st val and RT array are absolute, ILT shadows'
1780 * lines are relative.
1782 line = clients[i].first.val - p_mngr->pf_start_line;
1783 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1784 clients[i].first.val * ILT_ENTRY_IN_REGS;
1786 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1787 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1788 u64 ilt_hw_entry = 0;
1790 /** p_virt could be OSAL_NULL incase of dynamic
1793 if (p_shdw[line].p_virt != OSAL_NULL) {
1794 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1795 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1796 (unsigned long long)(p_shdw[line].p_phys >> 12));
1799 p_hwfn, ECORE_MSG_ILT,
1800 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1802 (unsigned long long)(p_shdw[line].p_phys >> 12));
1805 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1810 /* SRC (Searcher) PF */
1811 static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
1813 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1814 u32 rounded_conn_num, conn_num, conn_max;
1815 struct ecore_src_iids src_iids;
1817 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
1818 ecore_cxt_src_iids(p_mngr, &src_iids);
1819 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1823 conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
1824 rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
1826 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1827 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1828 OSAL_LOG2(rounded_conn_num));
1830 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1831 p_hwfn->p_cxt_mngr->first_free);
1832 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1833 p_hwfn->p_cxt_mngr->last_free);
1834 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1835 "Configured SEARCHER for 0x%08x connections\n",
1840 #define TM_CFG_NUM_IDS_SHIFT 0
1841 #define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1842 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1843 #define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1844 #define TM_CFG_PARENT_PF_SHIFT 25
1845 #define TM_CFG_PARENT_PF_MASK 0x7ULL
1847 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1848 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1850 #define TM_CFG_TID_OFFSET_SHIFT 30
1851 #define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1852 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1853 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1855 static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
1857 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1858 u32 active_seg_mask = 0, tm_offset, rt_reg;
1859 struct ecore_tm_iids tm_iids;
1863 OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
1864 ecore_cxt_tm_iids(p_mngr, &tm_iids);
1866 /* @@@TBD No pre-scan for now */
1868 /* Note: We assume consecutive VFs for a PF */
1869 for (i = 0; i < p_mngr->vf_count; i++) {
1871 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1872 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1873 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1874 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1876 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1877 (sizeof(cfg_word) / sizeof(u32)) *
1878 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
1879 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1883 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1884 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1885 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1886 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1888 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1889 (sizeof(cfg_word) / sizeof(u32)) *
1890 (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
1891 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1894 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1895 tm_iids.pf_cids ? 0x1 : 0x0);
1897 /* @@@TBD how to enable the scan for the VFs */
1899 tm_offset = tm_iids.per_vf_cids;
1901 /* Note: We assume consecutive VFs for a PF */
1902 for (i = 0; i < p_mngr->vf_count; i++) {
1904 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1905 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1906 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1907 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1908 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1910 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1911 (sizeof(cfg_word) / sizeof(u32)) *
1912 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
1914 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1917 tm_offset = tm_iids.pf_cids;
1918 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1920 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1921 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1922 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1923 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1924 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1926 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1927 (sizeof(cfg_word) / sizeof(u32)) *
1928 (NUM_OF_VFS(p_hwfn->p_dev) +
1929 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1931 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1932 active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
1934 tm_offset += tm_iids.pf_tids[i];
1937 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
1938 active_seg_mask = 0;
1940 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1942 /* @@@TBD how to enable the scan for the VFs */
1945 static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
1947 if ((p_hwfn->hw_info.personality == ECORE_PCI_FCOE) &&
1948 p_hwfn->pf_params.fcoe_pf_params.is_target)
1949 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1952 static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
1954 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1955 struct ecore_conn_type_cfg *p_fcoe;
1956 struct ecore_tid_seg *p_tid;
1958 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1960 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1961 if (!p_fcoe->cid_count)
1964 p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
1965 if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1966 STORE_RT_REG_AGG(p_hwfn,
1967 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1970 STORE_RT_REG_AGG(p_hwfn,
1971 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1976 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
1978 /* CDU configuration */
1979 ecore_cdu_init_common(p_hwfn);
1980 ecore_prs_init_common(p_hwfn);
1983 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1985 ecore_qm_init_pf(p_hwfn, p_ptt, true);
1986 ecore_cm_init_pf(p_hwfn);
1987 ecore_dq_init_pf(p_hwfn);
1988 ecore_cdu_init_pf(p_hwfn);
1989 ecore_ilt_init_pf(p_hwfn);
1990 ecore_src_init_pf(p_hwfn);
1991 ecore_tm_init_pf(p_hwfn);
1992 ecore_prs_init_pf(p_hwfn);
1995 enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
1996 enum protocol_type type,
1997 u32 *p_cid, u8 vfid)
1999 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2000 struct ecore_cid_acquired_map *p_map;
2003 if (type >= MAX_CONN_TYPES) {
2004 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
2008 if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
2009 DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
2013 /* Determine the right map to take this CID from */
2014 if (vfid == ECORE_CXT_PF_CID)
2015 p_map = &p_mngr->acquired[type];
2017 p_map = &p_mngr->acquired_vf[type][vfid];
2019 if (p_map->cid_map == OSAL_NULL) {
2020 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
2024 rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
2027 if (rel_cid >= p_map->max_count) {
2028 DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
2030 return ECORE_NORESOURCES;
2033 OSAL_SET_BIT(rel_cid, p_map->cid_map);
2035 *p_cid = rel_cid + p_map->start_cid;
2037 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
2038 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
2039 *p_cid, rel_cid, vfid, type);
2041 return ECORE_SUCCESS;
2044 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
2045 enum protocol_type type,
2048 return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
2051 static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
2053 enum protocol_type *p_type,
2054 struct ecore_cid_acquired_map **pp_map)
2056 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2059 /* Iterate over protocols and find matching cid range */
2060 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
2061 if (vfid == ECORE_CXT_PF_CID)
2062 *pp_map = &p_mngr->acquired[*p_type];
2064 *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
2066 if (!((*pp_map)->cid_map))
2068 if (cid >= (*pp_map)->start_cid &&
2069 cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
2074 if (*p_type == MAX_CONN_TYPES) {
2075 DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
2079 rel_cid = cid - (*pp_map)->start_cid;
2080 if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
2081 DP_NOTICE(p_hwfn, true,
2082 "CID %d [vifd %02x] not acquired", cid, vfid);
2088 *p_type = MAX_CONN_TYPES;
2089 *pp_map = OSAL_NULL;
2093 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
2095 struct ecore_cid_acquired_map *p_map = OSAL_NULL;
2096 enum protocol_type type;
2100 if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
2101 DP_NOTICE(p_hwfn, true,
2102 "Trying to return incorrect CID belonging to VF %02x\n",
2107 /* Test acquired and find matching per-protocol map */
2108 b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
2114 rel_cid = cid - p_map->start_cid;
2115 OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
2117 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
2118 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
2119 cid, rel_cid, vfid, type);
2122 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
2124 _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
2127 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
2128 struct ecore_cxt_info *p_info)
2130 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2131 struct ecore_cid_acquired_map *p_map = OSAL_NULL;
2132 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
2133 enum protocol_type type;
2136 /* Test acquired and find matching per-protocol map */
2137 b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
2144 /* set the protocl type */
2145 p_info->type = type;
2147 /* compute context virtual pointer */
2148 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
2150 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
2151 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
2152 line = p_info->iid / cxts_per_p;
2154 /* Make sure context is allocated (dynamic allocation) */
2155 if (!p_mngr->ilt_shadow[line].p_virt)
2158 p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
2159 p_info->iid % cxts_per_p * conn_cxt_size;
2161 DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
2162 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
2163 (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
2165 return ECORE_SUCCESS;
2168 static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
2169 struct ecore_rdma_pf_params *p_params,
2172 u32 num_cons, num_qps;
2173 enum protocol_type proto;
2175 /* The only case RDMA personality can be overriden is if NVRAM is
2176 * configured with ETH_RDMA or if no rdma protocol was requested
2178 switch (p_params->rdma_protocol) {
2179 case ECORE_RDMA_PROTOCOL_DEFAULT:
2180 if (p_hwfn->mcp_info->func_info.protocol ==
2181 ECORE_PCI_ETH_RDMA) {
2182 DP_NOTICE(p_hwfn, false,
2183 "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
2184 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
2187 case ECORE_RDMA_PROTOCOL_NONE:
2188 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
2189 return; /* intentional... nothing left to do... */
2190 case ECORE_RDMA_PROTOCOL_ROCE:
2191 if (p_hwfn->mcp_info->func_info.protocol == ECORE_PCI_ETH_RDMA)
2192 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
2194 case ECORE_RDMA_PROTOCOL_IWARP:
2195 if (p_hwfn->mcp_info->func_info.protocol == ECORE_PCI_ETH_RDMA)
2196 p_hwfn->hw_info.personality = ECORE_PCI_ETH_IWARP;
2200 switch (p_hwfn->hw_info.personality) {
2201 case ECORE_PCI_ETH_IWARP:
2202 /* Each QP requires one connection */
2203 num_cons = OSAL_MIN_T(u32, IWARP_MAX_QPS, p_params->num_qps);
2204 #ifdef CONFIG_ECORE_IWARP /* required for the define */
2205 /* additional connections required for passive tcp handling */
2206 num_cons += ECORE_IWARP_PREALLOC_CNT;
2208 proto = PROTOCOLID_IWARP;
2210 case ECORE_PCI_ETH_ROCE:
2211 num_qps = OSAL_MIN_T(u32, ROCE_MAX_QPS, p_params->num_qps);
2212 num_cons = num_qps * 2; /* each QP requires two connections */
2213 proto = PROTOCOLID_ROCE;
2219 if (num_cons && num_tasks) {
2220 u32 num_srqs, num_xrc_srqs, max_xrc_srqs, page_size;
2222 ecore_cxt_set_proto_cid_count(p_hwfn, proto,
2225 /* Deliberatly passing ROCE for tasks id. This is because
2226 * iWARP / RoCE share the task id.
2228 ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2229 ECORE_CXT_ROCE_TID_SEG,
2230 1, /* RoCE segment type */
2232 false); /* !force load */
2234 num_srqs = OSAL_MIN_T(u32, ECORE_RDMA_MAX_SRQS,
2235 p_params->num_srqs);
2237 /* XRC SRQs populate a single ILT page */
2238 page_size = ecore_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
2239 max_xrc_srqs = page_size / XRC_SRQ_CXT_SIZE;
2240 max_xrc_srqs = OSAL_MIN_T(u32, max_xrc_srqs, ECORE_RDMA_MAX_XRC_SRQS);
2242 num_xrc_srqs = OSAL_MIN_T(u32, p_params->num_xrc_srqs,
2244 ecore_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
2247 DP_INFO(p_hwfn->p_dev,
2248 "RDMA personality used without setting params!\n");
2252 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
2255 /* Set the number of required CORE connections */
2256 u32 core_cids = 1; /* SPQ */
2258 if (p_hwfn->using_ll2)
2259 core_cids += 4; /* @@@TBD Use the proper #define */
2261 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
2263 switch (p_hwfn->hw_info.personality) {
2264 case ECORE_PCI_ETH_RDMA:
2265 case ECORE_PCI_ETH_IWARP:
2266 case ECORE_PCI_ETH_ROCE:
2268 ecore_rdma_set_pf_params(p_hwfn,
2269 &p_hwfn->pf_params.rdma_pf_params,
2272 /* no need for break since RoCE coexist with Ethernet */
2278 struct ecore_eth_pf_params *p_params =
2279 &p_hwfn->pf_params.eth_pf_params;
2281 if (!p_params->num_vf_cons)
2282 p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
2283 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2285 p_params->num_vf_cons);
2287 count = p_params->num_arfs_filters;
2289 if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
2290 &p_hwfn->p_dev->mf_bits))
2291 p_hwfn->p_cxt_mngr->arfs_count = count;
2295 case ECORE_PCI_FCOE:
2297 struct ecore_fcoe_pf_params *p_params;
2299 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2301 if (p_params->num_cons && p_params->num_tasks) {
2302 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_FCOE,
2303 p_params->num_cons, 0);
2305 ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2306 ECORE_CXT_FCOE_TID_SEG,
2307 0, /* segment type */
2308 p_params->num_tasks,
2311 DP_INFO(p_hwfn->p_dev,
2312 "Fcoe personality used without setting params!\n");
2316 case ECORE_PCI_ISCSI:
2318 struct ecore_iscsi_pf_params *p_params;
2320 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2322 if (p_params->num_cons && p_params->num_tasks) {
2323 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ISCSI,
2324 p_params->num_cons, 0);
2326 ecore_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ISCSI,
2327 ECORE_CXT_ISCSI_TID_SEG,
2328 0, /* segment type */
2329 p_params->num_tasks,
2332 DP_INFO(p_hwfn->p_dev,
2333 "Iscsi personality used without setting params!\n");
2341 return ECORE_SUCCESS;
2344 enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
2345 struct ecore_tid_mem *p_info)
2347 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2348 u32 proto, seg, total_lines, i, shadow_line;
2349 struct ecore_ilt_client_cfg *p_cli;
2350 struct ecore_ilt_cli_blk *p_fl_seg;
2351 struct ecore_tid_seg *p_seg_info;
2353 /* Verify the personality */
2354 switch (p_hwfn->hw_info.personality) {
2355 case ECORE_PCI_FCOE:
2356 proto = PROTOCOLID_FCOE;
2357 seg = ECORE_CXT_FCOE_TID_SEG;
2359 case ECORE_PCI_ISCSI:
2360 proto = PROTOCOLID_ISCSI;
2361 seg = ECORE_CXT_ISCSI_TID_SEG;
2367 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2368 if (!p_cli->active) {
2372 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2373 if (!p_seg_info->has_fl_mem)
2376 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2377 total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2378 p_fl_seg->real_size_in_page);
2380 for (i = 0; i < total_lines; i++) {
2381 shadow_line = i + p_fl_seg->start_line -
2382 p_hwfn->p_cxt_mngr->pf_start_line;
2383 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2385 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2386 p_fl_seg->real_size_in_page;
2387 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2388 p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2391 return ECORE_SUCCESS;
2394 /* This function is very RoCE oriented, if another protocol in the future
2395 * will want this feature we'll need to modify the function to be more generic
2397 enum _ecore_status_t
2398 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
2399 enum ecore_cxt_elem_type elem_type,
2402 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2403 struct ecore_ilt_client_cfg *p_cli;
2404 struct ecore_ilt_cli_blk *p_blk;
2405 struct ecore_ptt *p_ptt;
2409 enum _ecore_status_t rc = ECORE_SUCCESS;
2411 switch (elem_type) {
2412 case ECORE_ELEM_CXT:
2413 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2414 elem_size = CONN_CXT_SIZE(p_hwfn);
2415 p_blk = &p_cli->pf_blks[CDUC_BLK];
2417 case ECORE_ELEM_SRQ:
2418 /* The first ILT page is not used for regular SRQs. Skip it. */
2419 iid += ecore_cxt_srqs_per_page(p_hwfn);
2420 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2421 elem_size = SRQ_CXT_SIZE;
2422 p_blk = &p_cli->pf_blks[SRQ_BLK];
2424 case ECORE_ELEM_XRC_SRQ:
2425 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2426 elem_size = XRC_SRQ_CXT_SIZE;
2427 p_blk = &p_cli->pf_blks[SRQ_BLK];
2429 case ECORE_ELEM_TASK:
2430 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2431 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2432 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
2435 DP_NOTICE(p_hwfn, false,
2436 "ECORE_INVALID elem type = %d", elem_type);
2440 /* Calculate line in ilt */
2441 hw_p_size = p_cli->p_size.val;
2442 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2443 line = p_blk->start_line + (iid / elems_per_p);
2444 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2446 /* If line is already allocated, do nothing, otherwise allocate it and
2447 * write it to the PSWRQ2 registers.
2448 * This section can be run in parallel from different contexts and thus
2449 * a mutex protection is needed.
2452 #pragma warning(suppress : 28121)
2454 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
2456 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2459 p_ptt = ecore_ptt_acquire(p_hwfn);
2461 DP_NOTICE(p_hwfn, false,
2462 "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
2467 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
2469 p_blk->real_size_in_page);
2474 OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
2476 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2477 * to compensate for a HW bug, but it is configured even if DIF is not
2478 * enabled. This is harmless and allows us to avoid a dedicated API. We
2479 * configure the field for all of the contexts on the newly allocated
2482 if (elem_type == ECORE_ELEM_TASK) {
2484 u8 *elem_start = (u8 *)p_virt;
2485 union type1_task_context *elem;
2487 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2488 elem = (union type1_task_context *)elem_start;
2489 SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2490 TDIF_TASK_CONTEXT_REF_TAG_MASK , 0xf);
2491 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2495 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2496 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2497 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2498 p_blk->real_size_in_page;
2500 /* compute absolute offset */
2501 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2502 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2505 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2506 SET_FIELD(ilt_hw_entry,
2508 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2510 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2511 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
2512 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2513 OSAL_NULL /* default parameters */);
2515 if (elem_type == ECORE_ELEM_CXT) {
2516 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2519 /* Update the relevant register in the parser */
2520 ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2521 last_cid_allocated - 1);
2523 /* RoCE w/a -> we don't write to the prs search reg until first
2524 * cid is allocated. This is because the prs checks
2525 * last_cid-1 >=0 making 0 a valid value... this will cause
2526 * the a context load to occur on a RoCE packet received with
2527 * cid=0 even before context was initialized, can happen with a
2528 * stray packet from switch or a packet with crc-error
2531 if (!p_hwfn->b_rdma_enabled_in_prs) {
2532 /* Enable Rdma search */
2533 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2534 p_hwfn->b_rdma_enabled_in_prs = true;
2539 ecore_ptt_release(p_hwfn, p_ptt);
2541 OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex);
2546 /* This function is very RoCE oriented, if another protocol in the future
2547 * will want this feature we'll need to modify the function to be more generic
2549 enum _ecore_status_t
2550 ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
2551 enum ecore_cxt_elem_type elem_type,
2552 u32 start_iid, u32 count)
2554 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2555 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2556 struct ecore_ilt_client_cfg *p_cli;
2557 struct ecore_ilt_cli_blk *p_blk;
2558 u32 end_iid = start_iid + count;
2559 struct ecore_ptt *p_ptt;
2560 u64 ilt_hw_entry = 0;
2563 switch (elem_type) {
2564 case ECORE_ELEM_CXT:
2565 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2566 elem_size = CONN_CXT_SIZE(p_hwfn);
2567 p_blk = &p_cli->pf_blks[CDUC_BLK];
2569 case ECORE_ELEM_SRQ:
2570 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2571 elem_size = SRQ_CXT_SIZE;
2572 p_blk = &p_cli->pf_blks[SRQ_BLK];
2574 case ECORE_ELEM_TASK:
2575 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2576 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2577 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
2580 DP_NOTICE(p_hwfn, false,
2581 "ECORE_INVALID elem type = %d", elem_type);
2585 /* Calculate line in ilt */
2586 hw_p_size = p_cli->p_size.val;
2587 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2588 start_line = p_blk->start_line + (start_iid / elems_per_p);
2589 end_line = p_blk->start_line + (end_iid / elems_per_p);
2590 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2593 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2594 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2596 p_ptt = ecore_ptt_acquire(p_hwfn);
2598 DP_NOTICE(p_hwfn, false, "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
2599 return ECORE_TIMEOUT;
2602 for (i = shadow_start_line; i < shadow_end_line; i++) {
2603 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2606 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2607 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2608 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
2609 p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
2611 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
2612 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2613 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2615 /* compute absolute offset */
2616 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2617 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2620 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2623 ecore_dmae_host2grc(p_hwfn, p_ptt,
2624 (u64)(osal_uintptr_t)&ilt_hw_entry,
2626 sizeof(ilt_hw_entry) / sizeof(u32),
2627 OSAL_NULL /* default parameters */);
2630 ecore_ptt_release(p_hwfn, p_ptt);
2632 return ECORE_SUCCESS;
2635 enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
2640 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2641 struct ecore_ilt_client_cfg *p_cli;
2642 struct ecore_tid_seg *p_seg_info;
2643 struct ecore_ilt_cli_blk *p_seg;
2644 u32 num_tids_per_block;
2645 u32 tid_size, ilt_idx;
2649 /* Verify the personality */
2650 switch (p_hwfn->hw_info.personality) {
2651 case ECORE_PCI_FCOE:
2652 proto = PROTOCOLID_FCOE;
2653 seg = ECORE_CXT_FCOE_TID_SEG;
2655 case ECORE_PCI_ISCSI:
2656 proto = PROTOCOLID_ISCSI;
2657 seg = ECORE_CXT_ISCSI_TID_SEG;
2663 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2664 if (!p_cli->active) {
2668 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2670 if (ctx_type == ECORE_CTX_WORKING_MEM) {
2671 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2672 } else if (ctx_type == ECORE_CTX_FL_MEM) {
2673 if (!p_seg_info->has_fl_mem) {
2676 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2680 total_lines = DIV_ROUND_UP(p_seg->total_size,
2681 p_seg->real_size_in_page);
2682 tid_size = p_mngr->task_type_size[p_seg_info->type];
2683 num_tids_per_block = p_seg->real_size_in_page / tid_size;
2685 if (total_lines < tid/num_tids_per_block)
2688 ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2689 p_mngr->pf_start_line;
2690 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2691 (tid % num_tids_per_block) * tid_size;
2693 return ECORE_SUCCESS;