2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
37 #include "ecore_gtt_reg_addr.h"
39 #include "ecore_chain.h"
40 #include "ecore_status.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_ops.h"
44 #include "ecore_int.h"
45 #include "ecore_cxt.h"
46 #include "ecore_spq.h"
47 #include "ecore_init_fw_funcs.h"
48 #include "ecore_sp_commands.h"
49 #include "ecore_dev_api.h"
50 #include "ecore_sriov.h"
52 #include "ecore_ll2.h"
53 #include "ecore_fcoe.h"
54 #include "ecore_iscsi.h"
55 #include "ecore_ooo.h"
56 #include "ecore_mcp.h"
57 #include "ecore_hw_defs.h"
58 #include "mcp_public.h"
59 #include "ecore_roce.h"
60 #include "ecore_iro.h"
62 #include "ecore_dev_api.h"
63 #include "ecore_dcbx.h"
64 #include "pcics_reg_driver.h"
67 /* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
68 * registers involved are not split and thus configuration is a race where
69 * some of the PFs configuration might be lost.
70 * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
71 * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
72 * there's more than a single compiled ecore component in system].
74 static osal_spinlock_t qm_lock;
75 static bool qm_lock_init = false;
78 #define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required to
79 * load the driver. The number was
84 #define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
87 BAR_ID_0, /* used for GRC */
88 BAR_ID_1 /* Used for doorbells */
91 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
93 u32 bar_reg = (bar_id == BAR_ID_0 ?
94 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
97 if (IS_VF(p_hwfn->p_dev)) {
98 /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
99 * read from actual register, but we're currently not using
100 * it for actual doorbelling.
105 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
107 return 1 << (val + 15);
109 /* The above registers were updated in the past only in CMT mode. Since
110 * they were found to be useful MFW started updating them from 8.7.7.0.
111 * In older MFW versions they are set to 0 which means disabled.
113 if (p_hwfn->p_dev->num_hwfns > 1) {
114 DP_NOTICE(p_hwfn, false,
115 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
116 return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
118 DP_NOTICE(p_hwfn, false,
119 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
124 void ecore_init_dp(struct ecore_dev *p_dev,
131 p_dev->dp_level = dp_level;
132 p_dev->dp_module = dp_module;
133 p_dev->dp_ctx = dp_ctx;
134 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
135 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
137 p_hwfn->dp_level = dp_level;
138 p_hwfn->dp_module = dp_module;
139 p_hwfn->dp_ctx = dp_ctx;
143 void ecore_init_struct(struct ecore_dev *p_dev)
147 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
148 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
150 p_hwfn->p_dev = p_dev;
152 p_hwfn->b_active = false;
154 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
155 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
158 /* hwfn 0 is always active */
159 p_dev->hwfns[0].b_active = true;
161 /* set the default cache alignment to 128 (may be overridden later) */
162 p_dev->cache_shift = 7;
165 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
167 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
169 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
170 qm_info->qm_pq_params = OSAL_NULL;
171 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
172 qm_info->qm_vport_params = OSAL_NULL;
173 OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
174 qm_info->qm_port_params = OSAL_NULL;
175 OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
176 qm_info->wfq_data = OSAL_NULL;
179 void ecore_resc_free(struct ecore_dev *p_dev)
184 for_each_hwfn(p_dev, i)
185 ecore_l2_free(&p_dev->hwfns[i]);
189 OSAL_FREE(p_dev, p_dev->fw_data);
190 p_dev->fw_data = OSAL_NULL;
192 OSAL_FREE(p_dev, p_dev->reset_stats);
193 p_dev->reset_stats = OSAL_NULL;
195 for_each_hwfn(p_dev, i) {
196 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
198 ecore_cxt_mngr_free(p_hwfn);
199 ecore_qm_info_free(p_hwfn);
200 ecore_spq_free(p_hwfn);
201 ecore_eq_free(p_hwfn);
202 ecore_consq_free(p_hwfn);
203 ecore_int_free(p_hwfn);
204 #ifdef CONFIG_ECORE_LL2
205 ecore_ll2_free(p_hwfn);
207 #ifdef CONFIG_ECORE_FCOE
208 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
209 ecore_fcoe_free(p_hwfn);
211 #ifdef CONFIG_ECORE_ISCSI
212 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
213 ecore_iscsi_free(p_hwfn);
214 ecore_ooo_free(p_hwfn);
217 ecore_iov_free(p_hwfn);
218 ecore_l2_free(p_hwfn);
219 ecore_dmae_info_free(p_hwfn);
220 ecore_dcbx_info_free(p_hwfn);
221 /* @@@TBD Flush work-queue ?*/
225 /******************** QM initialization *******************/
227 /* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
228 #define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
229 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
231 /* determines the physical queue flags for a given PF. */
232 static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
240 if (IS_ECORE_SRIOV(p_hwfn->p_dev))
241 flags |= PQ_FLAGS_VFS;
242 if (IS_ECORE_DCQCN(p_hwfn))
243 flags |= PQ_FLAGS_RLS;
246 switch (p_hwfn->hw_info.personality) {
248 flags |= PQ_FLAGS_MCOS;
251 flags |= PQ_FLAGS_OFLD;
253 case ECORE_PCI_ISCSI:
254 flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
256 case ECORE_PCI_ETH_ROCE:
257 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
259 case ECORE_PCI_ETH_IWARP:
260 flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
263 DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
271 /* Getters for resource amounts necessary for qm initialization */
272 u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
274 return p_hwfn->hw_info.num_hw_tc;
277 u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
279 return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
282 #define NUM_DEFAULT_RLS 1
284 u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
286 u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
288 /* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
289 num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
290 (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT),
291 ROCE_DCQCN_RP_MAX_QPS));
293 /* make sure after we reserve the default and VF rls we'll have something left */
294 if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
295 if (IS_ECORE_DCQCN(p_hwfn))
296 DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
300 /* subtract rls necessary for VFs and one default one for the PF */
301 num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
306 u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
308 u32 pq_flags = ecore_get_pq_flags(p_hwfn);
310 /* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
311 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
312 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
315 /* calc amount of PQs according to the requested flags */
316 u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
318 u32 pq_flags = ecore_get_pq_flags(p_hwfn);
320 return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
321 (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
322 (!!(PQ_FLAGS_LB & pq_flags)) +
323 (!!(PQ_FLAGS_OOO & pq_flags)) +
324 (!!(PQ_FLAGS_ACK & pq_flags)) +
325 (!!(PQ_FLAGS_OFLD & pq_flags)) +
326 (!!(PQ_FLAGS_LLT & pq_flags)) +
327 (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn);
330 /* initialize the top level QM params */
331 static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
333 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
336 /* pq and vport bases for this PF */
337 qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
338 qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
340 /* rate limiting and weighted fair queueing are always enabled */
341 qm_info->vport_rl_en = 1;
342 qm_info->vport_wfq_en = 1;
344 /* TC config is different for AH 4 port */
345 four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
347 /* in AH 4 port we have fewer TCs per port */
348 qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
350 /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
351 if (!qm_info->ooo_tc)
352 qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
355 /* initialize qm vport params */
356 static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
358 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
361 /* all vports participate in weighted fair queueing */
362 for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
363 qm_info->qm_vport_params[i].vport_wfq = 1;
366 /* initialize qm port params */
367 static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
369 /* Initialize qm port parameters */
370 u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
372 /* indicate how ooo and high pri traffic is dealt with */
373 active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
374 ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
376 for (i = 0; i < num_ports; i++) {
377 struct init_qm_port_params *p_qm_port =
378 &p_hwfn->qm_info.qm_port_params[i];
380 p_qm_port->active = 1;
381 p_qm_port->active_phys_tcs = active_phys_tcs;
382 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
383 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
387 /* Reset the params which must be reset for qm init. QM init may be called as
388 * a result of flows other than driver load (e.g. dcbx renegotiation). Other
389 * params may be affected by the init but would simply recalculate to the same
390 * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
391 * affected as these amounts stay the same.
393 static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
395 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
397 qm_info->num_pqs = 0;
398 qm_info->num_vports = 0;
399 qm_info->num_pf_rls = 0;
400 qm_info->num_vf_pqs = 0;
401 qm_info->first_vf_pq = 0;
402 qm_info->first_mcos_pq = 0;
403 qm_info->first_rl_pq = 0;
406 static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
408 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
410 qm_info->num_vports++;
412 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
413 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
416 /* initialize a single pq and manage qm_info resources accounting.
417 * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
418 * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
421 /* flags for pq init */
422 #define PQ_INIT_SHARE_VPORT (1 << 0)
423 #define PQ_INIT_PF_RL (1 << 1)
424 #define PQ_INIT_VF_RL (1 << 2)
426 /* defines for pq init */
427 #define PQ_INIT_DEFAULT_WRR_GROUP 1
428 #define PQ_INIT_DEFAULT_TC 0
429 #define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
431 static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
432 struct ecore_qm_info *qm_info,
433 u8 tc, u32 pq_init_flags)
435 u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
438 DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
441 qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
442 qm_info->qm_pq_params[pq_idx].tc_id = tc;
443 qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
444 qm_info->qm_pq_params[pq_idx].rl_valid =
445 (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
447 /* qm params accounting */
449 if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
450 qm_info->num_vports++;
452 if (pq_init_flags & PQ_INIT_PF_RL)
453 qm_info->num_pf_rls++;
455 if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
456 DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
458 if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
459 DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
462 /* get pq index according to PQ_FLAGS */
463 static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
466 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
468 /* Can't have multiple flags set here */
469 if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
474 return &qm_info->first_rl_pq;
476 return &qm_info->first_mcos_pq;
478 return &qm_info->pure_lb_pq;
480 return &qm_info->ooo_pq;
482 return &qm_info->pure_ack_pq;
484 return &qm_info->offload_pq;
486 return &qm_info->low_latency_pq;
488 return &qm_info->first_vf_pq;
494 DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
498 /* save pq index in qm info */
499 static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
500 u32 pq_flags, u16 pq_val)
502 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
504 *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
507 /* get tx pq index, with the PQ TX base already set (ready for context init) */
508 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
510 u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
512 return *base_pq_idx + CM_TX_PQ_BASE;
515 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
517 u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
520 DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
522 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
525 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
527 u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
530 DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
532 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
535 u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
537 u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
540 DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
542 return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
545 /* Functions for creating specific types of pqs */
546 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
548 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
550 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
553 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
554 ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
557 static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
559 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
561 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
564 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
565 ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
568 static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
570 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
572 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
575 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
576 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
579 static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
581 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
583 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
586 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
587 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
590 static void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
592 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
594 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
597 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
598 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
601 static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
603 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
606 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
609 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
610 for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
611 ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
614 static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
616 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
617 u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
619 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
622 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
623 qm_info->num_vf_pqs = num_vfs;
624 for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
625 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
628 static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
630 u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
631 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
633 if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
636 ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
637 for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
638 ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
641 static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
643 /* rate limited pqs, must come first (FW assumption) */
644 ecore_init_qm_rl_pqs(p_hwfn);
646 /* pqs for multi cos */
647 ecore_init_qm_mcos_pqs(p_hwfn);
649 /* pure loopback pq */
650 ecore_init_qm_lb_pq(p_hwfn);
652 /* out of order pq */
653 ecore_init_qm_ooo_pq(p_hwfn);
656 ecore_init_qm_pure_ack_pq(p_hwfn);
658 /* pq for offloaded protocol */
659 ecore_init_qm_offload_pq(p_hwfn);
662 ecore_init_qm_low_latency_pq(p_hwfn);
664 /* done sharing vports */
665 ecore_init_qm_advance_vport(p_hwfn);
668 ecore_init_qm_vf_pqs(p_hwfn);
671 /* compare values of getters against resources amounts */
672 static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
674 if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
675 DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
679 if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
680 DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
684 return ECORE_SUCCESS;
688 * Function for verbose printing of the qm initialization results
690 static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
692 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
693 struct init_qm_vport_params *vport;
694 struct init_qm_port_params *port;
695 struct init_qm_pq_params *pq;
698 /* top level params */
699 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
700 qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq);
701 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
702 qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port);
703 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
704 qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
707 for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
708 port = &(qm_info->qm_port_params[i]);
709 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
710 i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved);
714 for (i = 0; i < qm_info->num_vports; i++) {
715 vport = &(qm_info->qm_vport_params[i]);
716 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
717 qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq);
718 for (tc = 0; tc < NUM_OF_TCS; tc++)
719 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
720 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
724 for (i = 0; i < qm_info->num_pqs; i++) {
725 pq = &(qm_info->qm_pq_params[i]);
726 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
727 qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid);
731 static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
733 /* reset params required for init run */
734 ecore_init_qm_reset_params(p_hwfn);
736 /* init QM top level params */
737 ecore_init_qm_params(p_hwfn);
739 /* init QM port params */
740 ecore_init_qm_port_params(p_hwfn);
742 /* init QM vport params */
743 ecore_init_qm_vport_params(p_hwfn);
745 /* init QM physical queue params */
746 ecore_init_qm_pq_params(p_hwfn);
748 /* display all that init */
749 ecore_dp_init_qm_params(p_hwfn);
752 /* This function reconfigures the QM pf on the fly.
753 * For this purpose we:
754 * 1. reconfigure the QM database
755 * 2. set new values to runtime array
756 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
757 * 4. activate init tool in QM_PF stage
758 * 5. send an sdm_qm_cmd through rbc interface to release the QM
760 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
761 struct ecore_ptt *p_ptt)
763 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
765 enum _ecore_status_t rc;
767 /* initialize ecore's qm data structure */
768 ecore_init_qm_info(p_hwfn);
770 /* stop PF's qm queues */
771 OSAL_SPIN_LOCK(&qm_lock);
772 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
773 qm_info->start_pq, qm_info->num_pqs);
774 OSAL_SPIN_UNLOCK(&qm_lock);
778 /* clear the QM_PF runtime phase leftovers from previous init */
779 ecore_init_clear_rt_data(p_hwfn);
781 /* prepare QM portion of runtime array */
782 ecore_qm_init_pf(p_hwfn);
784 /* activate init tool on runtime array */
785 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
786 p_hwfn->hw_info.hw_mode);
787 if (rc != ECORE_SUCCESS)
790 /* start PF's qm queues */
791 OSAL_SPIN_LOCK(&qm_lock);
792 b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
793 qm_info->start_pq, qm_info->num_pqs);
794 OSAL_SPIN_UNLOCK(&qm_lock);
798 return ECORE_SUCCESS;
801 static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
803 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
804 enum _ecore_status_t rc;
806 rc = ecore_init_qm_sanity(p_hwfn);
807 if (rc != ECORE_SUCCESS)
810 qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
811 sizeof(struct init_qm_pq_params) *
812 ecore_init_qm_get_num_pqs(p_hwfn));
813 if (!qm_info->qm_pq_params)
816 qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
817 sizeof(struct init_qm_vport_params) *
818 ecore_init_qm_get_num_vports(p_hwfn));
819 if (!qm_info->qm_vport_params)
822 qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
823 sizeof(struct init_qm_port_params) *
824 p_hwfn->p_dev->num_ports_in_engines);
825 if (!qm_info->qm_port_params)
828 qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
829 sizeof(struct ecore_wfq_data) *
830 ecore_init_qm_get_num_vports(p_hwfn));
831 if (!qm_info->wfq_data)
834 return ECORE_SUCCESS;
837 DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
838 ecore_qm_info_free(p_hwfn);
841 /******************** End QM initialization ***************/
843 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
845 enum _ecore_status_t rc = ECORE_SUCCESS;
846 u32 rdma_tasks, excess_tasks;
851 for_each_hwfn(p_dev, i) {
852 rc = ecore_l2_alloc(&p_dev->hwfns[i]);
853 if (rc != ECORE_SUCCESS)
859 p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
860 sizeof(*p_dev->fw_data));
864 for_each_hwfn(p_dev, i) {
865 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
866 u32 n_eqes, num_cons;
868 /* First allocate the context manager structure */
869 rc = ecore_cxt_mngr_alloc(p_hwfn);
873 /* Set the HW cid/tid numbers (in the contest manager)
874 * Must be done prior to any further computations.
876 rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
880 rc = ecore_alloc_qm_data(p_hwfn);
885 ecore_init_qm_info(p_hwfn);
887 /* Compute the ILT client partition */
888 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
890 DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n");
891 /* In case there are not enough ILT lines we reduce the
892 * number of RDMA tasks and re-compute.
894 excess_tasks = ecore_cxt_cfg_ilt_compute_excess(
899 rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
900 rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks);
904 rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
906 DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n",
913 /* CID map / ILT shadow table / T2
914 * The talbes sizes are determined by the computations above
916 rc = ecore_cxt_tables_alloc(p_hwfn);
920 /* SPQ, must follow ILT because initializes SPQ context */
921 rc = ecore_spq_alloc(p_hwfn);
925 /* SP status block allocation */
926 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
929 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
933 rc = ecore_iov_alloc(p_hwfn);
938 n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
939 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
940 /* Calculate the EQ size
941 * ---------------------
942 * Each ICID may generate up to one event at a time i.e.
943 * the event must be handled/cleared before a new one
944 * can be generated. We calculate the sum of events per
945 * protocol and create an EQ deep enough to handle the
947 * - Core - according to SPQ.
948 * - RoCE - per QP there are a couple of ICIDs, one
949 * responder and one requester, each can
950 * generate an EQE => n_eqes_qp = 2 * n_qp.
951 * Each CQ can generate an EQE. There are 2 CQs
952 * per QP => n_eqes_cq = 2 * n_qp.
953 * Hence the RoCE total is 4 * n_qp or
955 * - ENet - There can be up to two events per VF. One
956 * for VF-PF channel and another for VF FLR
957 * initial cleanup. The number of VFs is
958 * bounded by MAX_NUM_VFS_BB, and is much
959 * smaller than RoCE's so we avoid exact
962 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
963 num_cons = ecore_cxt_get_proto_cid_count(
964 p_hwfn, PROTOCOLID_ROCE, OSAL_NULL);
967 num_cons = ecore_cxt_get_proto_cid_count(
968 p_hwfn, PROTOCOLID_IWARP,
971 n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
972 } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
973 num_cons = ecore_cxt_get_proto_cid_count(
974 p_hwfn, PROTOCOLID_ISCSI, OSAL_NULL);
975 n_eqes += 2 * num_cons;
978 if (n_eqes > 0xFFFF) {
980 "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
985 rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
989 rc = ecore_consq_alloc(p_hwfn);
993 rc = ecore_l2_alloc(p_hwfn);
994 if (rc != ECORE_SUCCESS)
997 #ifdef CONFIG_ECORE_LL2
998 if (p_hwfn->using_ll2) {
999 rc = ecore_ll2_alloc(p_hwfn);
1004 #ifdef CONFIG_ECORE_FCOE
1005 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
1006 rc = ecore_fcoe_alloc(p_hwfn);
1011 #ifdef CONFIG_ECORE_ISCSI
1012 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
1013 rc = ecore_iscsi_alloc(p_hwfn);
1016 rc = ecore_ooo_alloc(p_hwfn);
1022 /* DMA info initialization */
1023 rc = ecore_dmae_info_alloc(p_hwfn);
1025 DP_NOTICE(p_hwfn, true,
1026 "Failed to allocate memory for dmae_info structure\n");
1030 /* DCBX initialization */
1031 rc = ecore_dcbx_info_alloc(p_hwfn);
1033 DP_NOTICE(p_hwfn, true,
1034 "Failed to allocate memory for dcbx structure\n");
1039 p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
1040 sizeof(*p_dev->reset_stats));
1041 if (!p_dev->reset_stats) {
1042 DP_NOTICE(p_dev, true,
1043 "Failed to allocate reset statistics\n");
1047 return ECORE_SUCCESS;
1052 ecore_resc_free(p_dev);
1056 void ecore_resc_setup(struct ecore_dev *p_dev)
1061 for_each_hwfn(p_dev, i)
1062 ecore_l2_setup(&p_dev->hwfns[i]);
1066 for_each_hwfn(p_dev, i) {
1067 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1069 ecore_cxt_mngr_setup(p_hwfn);
1070 ecore_spq_setup(p_hwfn);
1071 ecore_eq_setup(p_hwfn);
1072 ecore_consq_setup(p_hwfn);
1074 /* Read shadow of current MFW mailbox */
1075 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
1076 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
1077 p_hwfn->mcp_info->mfw_mb_cur,
1078 p_hwfn->mcp_info->mfw_mb_length);
1080 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
1082 ecore_l2_setup(p_hwfn);
1083 ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
1084 #ifdef CONFIG_ECORE_LL2
1085 if (p_hwfn->using_ll2)
1086 ecore_ll2_setup(p_hwfn);
1088 #ifdef CONFIG_ECORE_FCOE
1089 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
1090 ecore_fcoe_setup(p_hwfn);
1092 #ifdef CONFIG_ECORE_ISCSI
1093 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
1094 ecore_iscsi_setup(p_hwfn);
1095 ecore_ooo_setup(p_hwfn);
1101 #define FINAL_CLEANUP_POLL_CNT (100)
1102 #define FINAL_CLEANUP_POLL_TIME (10)
1103 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
1104 struct ecore_ptt *p_ptt,
1107 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
1108 enum _ecore_status_t rc = ECORE_TIMEOUT;
1111 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
1112 CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1113 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
1114 return ECORE_SUCCESS;
1118 addr = GTT_BAR0_MAP_REG_USDM_RAM +
1119 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
1124 command |= X_FINAL_CLEANUP_AGG_INT <<
1125 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
1126 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
1127 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
1128 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
1130 /* Make sure notification is not set before initiating final cleanup */
1131 if (REG_RD(p_hwfn, addr)) {
1132 DP_NOTICE(p_hwfn, false,
1133 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
1134 REG_WR(p_hwfn, addr, 0);
1137 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1138 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
1141 ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
1143 /* Poll until completion */
1144 while (!REG_RD(p_hwfn, addr) && count--)
1145 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
1147 if (REG_RD(p_hwfn, addr))
1150 DP_NOTICE(p_hwfn, true, "Failed to receive FW final cleanup notification\n");
1152 /* Cleanup afterwards */
1153 REG_WR(p_hwfn, addr, 0);
1158 static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
1162 if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
1163 hw_mode |= 1 << MODE_BB;
1164 } else if (ECORE_IS_AH(p_hwfn->p_dev)) {
1165 hw_mode |= 1 << MODE_K2;
1167 DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
1168 p_hwfn->p_dev->type);
1172 /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE*/
1173 switch (p_hwfn->p_dev->num_ports_in_engines) {
1175 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
1178 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
1181 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
1184 DP_NOTICE(p_hwfn, true, "num_ports_in_engine = %d not supported\n",
1185 p_hwfn->p_dev->num_ports_in_engines);
1189 switch (p_hwfn->p_dev->mf_mode) {
1190 case ECORE_MF_DEFAULT:
1192 hw_mode |= 1 << MODE_MF_SI;
1194 case ECORE_MF_OVLAN:
1195 hw_mode |= 1 << MODE_MF_SD;
1198 DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n");
1199 hw_mode |= 1 << MODE_MF_SI;
1203 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1204 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1205 hw_mode |= 1 << MODE_FPGA;
1207 if (p_hwfn->p_dev->b_is_emul_full)
1208 hw_mode |= 1 << MODE_EMUL_FULL;
1210 hw_mode |= 1 << MODE_EMUL_REDUCED;
1214 hw_mode |= 1 << MODE_ASIC;
1216 if (p_hwfn->p_dev->num_hwfns > 1)
1217 hw_mode |= 1 << MODE_100G;
1219 p_hwfn->hw_info.hw_mode = hw_mode;
1221 DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
1222 "Configuring function for hw_mode: 0x%08x\n",
1223 p_hwfn->hw_info.hw_mode);
1225 return ECORE_SUCCESS;
1229 /* MFW-replacement initializations for non-ASIC */
1230 static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
1231 struct ecore_ptt *p_ptt)
1233 struct ecore_dev *p_dev = p_hwfn->p_dev;
1237 if (CHIP_REV_IS_EMUL(p_dev)) {
1238 if (ECORE_IS_AH(p_dev))
1240 else if (ECORE_IS_E5(p_dev))
1241 ECORE_E5_MISSING_CODE;
1244 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
1246 if (CHIP_REV_IS_EMUL(p_dev) &&
1247 (ECORE_IS_AH(p_dev) || ECORE_IS_E5(p_dev)))
1248 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
1251 /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
1252 /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
1253 if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
1254 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
1256 if (CHIP_REV_IS_EMUL(p_dev)) {
1257 if (ECORE_IS_AH(p_dev)) {
1258 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
1259 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
1260 (p_dev->num_ports_in_engines >> 1));
1262 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
1263 p_dev->num_ports_in_engines == 4 ? 0 : 3);
1264 } else if (ECORE_IS_E5(p_dev)) {
1265 ECORE_E5_MISSING_CODE;
1270 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
1271 for (i = 0; i < 100; i++) {
1273 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
1277 DP_NOTICE(p_hwfn, true, "RBC done failed to complete in PSWRQ2\n");
1279 return ECORE_SUCCESS;
1283 /* Init run time data for all PFs and their VFs on an engine.
1284 * TBD - for VFs - Once we have parent PF info for each VF in
1285 * shmem available as CAU requires knowledge of parent PF for each VF.
1287 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
1289 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
1292 for_each_hwfn(p_dev, i) {
1293 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1294 struct ecore_igu_info *p_igu_info;
1295 struct ecore_igu_block *p_block;
1296 struct cau_sb_entry sb_entry;
1298 p_igu_info = p_hwfn->hw_info.p_igu_info;
1301 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
1303 p_block = &p_igu_info->entry[igu_sb_id];
1305 if (!p_block->is_pf)
1308 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
1309 p_block->function_id,
1311 STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
1317 static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
1318 struct ecore_ptt *p_ptt)
1320 u32 val, wr_mbs, cache_line_size;
1322 val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
1335 "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
1340 cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
1341 switch (cache_line_size) {
1356 "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
1360 if (OSAL_CACHE_LINE_SIZE > wr_mbs)
1362 "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
1363 OSAL_CACHE_LINE_SIZE, wr_mbs);
1365 STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
1368 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
1369 struct ecore_ptt *p_ptt,
1372 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1373 struct ecore_dev *p_dev = p_hwfn->p_dev;
1374 u8 vf_id, max_num_vfs;
1377 enum _ecore_status_t rc = ECORE_SUCCESS;
1379 ecore_init_cau_rt_data(p_dev);
1381 /* Program GTT windows */
1382 ecore_gtt_init(p_hwfn);
1385 if (CHIP_REV_IS_EMUL(p_dev)) {
1386 rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
1387 if (rc != ECORE_SUCCESS)
1392 if (p_hwfn->mcp_info) {
1393 if (p_hwfn->mcp_info->func_info.bandwidth_max)
1394 qm_info->pf_rl_en = 1;
1395 if (p_hwfn->mcp_info->func_info.bandwidth_min)
1396 qm_info->pf_wfq_en = 1;
1399 ecore_qm_common_rt_init(p_hwfn,
1400 p_dev->num_ports_in_engines,
1401 qm_info->max_phys_tcs_per_port,
1402 qm_info->pf_rl_en, qm_info->pf_wfq_en,
1403 qm_info->vport_rl_en, qm_info->vport_wfq_en,
1404 qm_info->qm_port_params);
1406 ecore_cxt_hw_init_common(p_hwfn);
1408 ecore_init_cache_line_size(p_hwfn, p_ptt);
1410 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
1411 if (rc != ECORE_SUCCESS)
1414 /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
1415 * need to decide with which value, maybe runtime
1417 ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
1418 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
1420 if (ECORE_IS_BB(p_dev)) {
1421 /* Workaround clears ROCE search for all functions to prevent
1422 * involving non intialized function in processing ROCE packet.
1424 num_pfs = NUM_OF_ENG_PFS(p_dev);
1425 for (pf_id = 0; pf_id < num_pfs; pf_id++) {
1426 ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
1427 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1428 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1430 /* pretend to original PF */
1431 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1434 /* Workaround for avoiding CCFC execution error when getting packets
1435 * with CRC errors, and allowing instead the invoking of the FW error
1437 * This is not done inside the init tool since it currently can't
1438 * perform a pretending to VFs.
1440 max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
1441 for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
1442 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
1443 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
1444 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
1445 ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
1446 ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
1447 ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
1449 /* pretend to original PF */
1450 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1456 #define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4)
1457 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5)
1459 #define PMEG_IF_BYTE_COUNT 8
1461 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
1462 struct ecore_ptt *p_ptt,
1468 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1469 "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
1470 ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
1471 (8 << PMEG_IF_BYTE_COUNT),
1472 (reg_type << 25) | (addr << 8) | port,
1473 (u32)((data >> 32) & 0xffffffff),
1474 (u32)(data & 0xffffffff));
1476 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
1477 (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
1479 (8 << PMEG_IF_BYTE_COUNT));
1480 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
1481 (reg_type << 25) | (addr << 8) | port);
1482 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
1483 ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
1484 (data >> 32) & 0xffffffff);
1487 #define XLPORT_MODE_REG (0x20a)
1488 #define XLPORT_MAC_CONTROL (0x210)
1489 #define XLPORT_FLOW_CONTROL_CONFIG (0x207)
1490 #define XLPORT_ENABLE_REG (0x20b)
1492 #define XLMAC_CTRL (0x600)
1493 #define XLMAC_MODE (0x601)
1494 #define XLMAC_RX_MAX_SIZE (0x608)
1495 #define XLMAC_TX_CTRL (0x604)
1496 #define XLMAC_PAUSE_CTRL (0x60d)
1497 #define XLMAC_PFC_CTRL (0x60e)
1499 static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
1500 struct ecore_ptt *p_ptt)
1502 u8 loopback = 0, port = p_hwfn->port_id * 2;
1504 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
1506 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG,
1507 (0x4 << 4) | 0x4, 1, port); /* XLPORT MAC MODE */ /* 0 Quad, 4 Single... */
1508 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
1509 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
1510 0x40, 0, port); /*XLMAC: SOFT RESET */
1511 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE,
1512 0x40, 0, port); /*XLMAC: Port Speed >= 10Gbps */
1513 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE,
1514 0x3fff, 0, port); /* XLMAC: Max Size */
1515 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
1516 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
1518 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL,
1520 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
1521 0x30ffffc000ULL, 0, port);
1522 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2),
1523 0, port); /* XLMAC: TX_EN, RX_EN */
1524 ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
1525 0, port); /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
1526 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG,
1527 1, 0, port); /* Enabled Parallel PFC interface */
1528 ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG,
1529 0xf, 1, port); /* XLPORT port enable */
1532 static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
1533 struct ecore_ptt *p_ptt)
1535 u8 port = p_hwfn->port_id;
1536 u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
1538 DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
1540 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
1541 (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
1543 CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
1544 (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
1546 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
1547 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
1549 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
1550 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
1552 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
1553 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
1555 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
1556 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
1558 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
1560 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
1562 ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
1564 ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
1568 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
1569 struct ecore_ptt *p_ptt)
1571 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev))
1572 ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
1574 ecore_emul_link_init_bb(p_hwfn, p_ptt);
1579 static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
1580 struct ecore_ptt *p_ptt, u8 port)
1582 int port_offset = port ? 0x800 : 0;
1583 u32 xmac_rxctrl = 0;
1586 /* FIXME: move to common start */
1587 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2*sizeof(u32),
1588 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
1590 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
1591 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
1593 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
1595 /* Set the number of ports on the Warp Core to 10G */
1596 ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
1598 /* Soft reset of XMAC */
1599 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
1600 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
1602 ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
1603 MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
1605 /* FIXME: move to common end */
1606 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
1607 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
1609 /* Set Max packet size: initialize XMAC block register for port 0 */
1610 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
1612 /* CRC append for Tx packets: init XMAC block register for port 1 */
1613 ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
1615 /* Enable TX and RX: initialize XMAC block register for port 1 */
1616 ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
1617 XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
1618 xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
1619 XMAC_REG_RX_CTRL_BB + port_offset);
1620 xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
1621 ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
1625 static enum _ecore_status_t
1626 ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
1627 struct ecore_ptt *p_ptt,
1628 u32 pwm_region_size,
1631 u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
1632 u32 dpi_bit_shift, dpi_count;
1635 /* Calculate DPI size
1636 * ------------------
1637 * The PWM region contains Doorbell Pages. The first is reserverd for
1638 * the kernel for, e.g, L2. The others are free to be used by non-
1639 * trusted applications, typically from user space. Each page, called a
1640 * doorbell page is sectioned into windows that allow doorbells to be
1641 * issued in parallel by the kernel/application. The size of such a
1642 * window (a.k.a. WID) is 1kB.
1644 * 1kB WID x N WIDS = DPI page size
1645 * DPI page size x N DPIs = PWM region size
1647 * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
1648 * in order to ensure that two applications won't share the same page.
1649 * It also must contain at least one WID per CPU to allow parallelism.
1650 * It also must be a power of 2, since it is stored as a bit shift.
1652 * The DPI page size is stored in a register as 'dpi_bit_shift' so that
1653 * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
1654 * containing 4 WIDs.
1656 dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
1657 dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
1658 dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
1659 dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
1660 dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
1662 dpi_count = pwm_region_size / dpi_page_size;
1664 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
1665 min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
1668 p_hwfn->dpi_size = dpi_page_size;
1669 p_hwfn->dpi_count = dpi_count;
1671 /* Update registers */
1672 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
1674 if (dpi_count < min_dpis)
1675 return ECORE_NORESOURCES;
1677 return ECORE_SUCCESS;
1680 enum ECORE_ROCE_EDPM_MODE {
1681 ECORE_ROCE_EDPM_MODE_ENABLE = 0,
1682 ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
1683 ECORE_ROCE_EDPM_MODE_DISABLE = 2,
1686 static enum _ecore_status_t
1687 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
1688 struct ecore_ptt *p_ptt)
1690 u32 pwm_regsize, norm_regsize;
1691 u32 non_pwm_conn, min_addr_reg1;
1692 u32 db_bar_size, n_cpus = 1;
1695 enum _ecore_status_t rc = ECORE_SUCCESS;
1698 db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
1699 if (p_hwfn->p_dev->num_hwfns > 1)
1702 /* Calculate doorbell regions
1703 * -----------------------------------
1704 * The doorbell BAR is made of two regions. The first is called normal
1705 * region and the second is called PWM region. In the normal region
1706 * each ICID has its own set of addresses so that writing to that
1707 * specific address identifies the ICID. In the Process Window Mode
1708 * region the ICID is given in the data written to the doorbell. The
1709 * above per PF register denotes the offset in the doorbell BAR in which
1710 * the PWM region begins.
1711 * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
1712 * non-PWM connection. The calculation below computes the total non-PWM
1713 * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
1714 * in units of 4,096 bytes.
1716 non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
1717 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
1719 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1721 norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
1722 min_addr_reg1 = norm_regsize / 4096;
1723 pwm_regsize = db_bar_size - norm_regsize;
1725 /* Check that the normal and PWM sizes are valid */
1726 if (db_bar_size < norm_regsize) {
1727 DP_ERR(p_hwfn->p_dev, "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", db_bar_size, norm_regsize);
1728 return ECORE_NORESOURCES;
1730 if (pwm_regsize < ECORE_MIN_PWM_REGION) {
1731 DP_ERR(p_hwfn->p_dev, "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, norm_regsize);
1732 return ECORE_NORESOURCES;
1735 /* Calculate number of DPIs */
1736 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
1737 if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
1738 ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
1739 /* Either EDPM is mandatory, or we are attempting to allocate a
1742 n_cpus = OSAL_NUM_ACTIVE_CPU();
1743 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
1746 cond = ((rc != ECORE_SUCCESS) &&
1747 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
1748 (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
1749 if (cond || p_hwfn->dcbx_no_edpm) {
1750 /* Either EDPM is disabled from user configuration, or it is
1751 * disabled via DCBx, or it is not mandatory and we failed to
1752 * allocated a WID per CPU.
1755 rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
1757 #ifdef CONFIG_ECORE_ROCE
1758 /* If we entered this flow due to DCBX then the DPM register is
1759 * already configured.
1762 ecore_rdma_dpm_bar(p_hwfn, p_ptt);
1766 p_hwfn->wid_count = (u16)n_cpus;
1768 DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
1769 norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count,
1770 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
1771 "disabled" : "enabled");
1773 /* Check return codes from above calls */
1774 if (rc != ECORE_SUCCESS) {
1776 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d. You can try reducing this down to %d via user configuration n_dpi or by disabling EDPM via user configuration roce_edpm\n",
1778 p_hwfn->pf_params.rdma_pf_params.min_dpis,
1780 return ECORE_NORESOURCES;
1784 p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
1785 * calculate the doorbell
1789 /* Update registers */
1790 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
1791 pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
1792 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
1793 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
1795 return ECORE_SUCCESS;
1798 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
1799 struct ecore_ptt *p_ptt,
1802 enum _ecore_status_t rc = ECORE_SUCCESS;
1804 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
1806 if (rc != ECORE_SUCCESS)
1809 /* FW 8.10.5.0 requires us to configure PF_VECTOR and DUALMODE in LLH.
1810 * This would hopefully be moved to MFW.
1812 if (IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) {
1815 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) ==
1817 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1818 "PF[%08x] is first eth on engine\n",
1821 /* We should have configured BIT for ppfid, i.e., the
1822 * relative function number in the port. But there's a
1823 * bug in LLH in BB where the ppfid is actually engine
1824 * based, so we need to take this into account.
1826 if (!ECORE_IS_BB(p_hwfn->p_dev))
1827 pf_id /= p_hwfn->p_dev->num_ports_in_engines;
1829 ecore_wr(p_hwfn, p_ptt,
1830 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
1833 /* Take the protocol-based hit vector if there is a hit,
1834 * otherwise take the other vector.
1836 ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
1840 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
1841 return ECORE_SUCCESS;
1843 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1844 if (ECORE_IS_AH(p_hwfn->p_dev))
1845 return ECORE_SUCCESS;
1846 else if (ECORE_IS_BB(p_hwfn->p_dev))
1847 ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
1849 ECORE_E5_MISSING_CODE;
1850 } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1851 if (p_hwfn->p_dev->num_hwfns > 1) {
1852 /* Activate OPTE in CMT */
1855 val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
1857 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
1858 ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
1859 ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
1860 ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
1861 ecore_wr(p_hwfn, p_ptt,
1862 NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
1863 ecore_wr(p_hwfn, p_ptt,
1864 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
1865 ecore_wr(p_hwfn, p_ptt,
1866 NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
1870 ecore_emul_link_init(p_hwfn, p_ptt);
1872 DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
1879 static enum _ecore_status_t ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
1880 struct ecore_ptt *p_ptt,
1881 struct ecore_tunnel_info *p_tunn,
1884 enum ecore_int_mode int_mode,
1885 bool allow_npar_tx_switch)
1887 u8 rel_pf_id = p_hwfn->rel_pf_id;
1889 enum _ecore_status_t rc = ECORE_SUCCESS;
1893 if (p_hwfn->mcp_info) {
1894 struct ecore_mcp_function_info *p_info;
1896 p_info = &p_hwfn->mcp_info->func_info;
1897 if (p_info->bandwidth_min)
1898 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
1900 /* Update rate limit once we'll actually have a link */
1901 p_hwfn->qm_info.pf_rl = 100000;
1903 ecore_cxt_hw_init_pf(p_hwfn);
1905 ecore_int_igu_init_rt(p_hwfn);
1907 /* Set VLAN in NIG if needed */
1908 if (hw_mode & (1 << MODE_MF_SD)) {
1909 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
1910 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
1911 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
1912 p_hwfn->hw_info.ovlan);
1915 /* Enable classification by MAC if needed */
1916 if (hw_mode & (1 << MODE_MF_SI)) {
1917 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n");
1918 STORE_RT_REG(p_hwfn,
1919 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
1922 /* Protocl Configuration - @@@TBD - should we set 0 otherwise?*/
1923 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
1924 (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
1925 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
1926 (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
1927 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
1929 /* perform debug configuration when chip is out of reset */
1930 OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
1932 /* Cleanup chip from previous driver if such remains exist */
1933 rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
1934 if (rc != ECORE_SUCCESS) {
1935 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
1939 /* PF Init sequence */
1940 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
1944 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1945 rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
1949 /* Pure runtime initializations - directly to the HW */
1950 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
1952 /* PCI relaxed ordering causes a decrease in the performance on some
1953 * systems. Till a root cause is found, disable this attribute in the
1956 pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
1958 DP_NOTICE(p_hwfn, true,
1959 "Failed to find the PCI Express Capability structure in the PCI config space\n");
1962 OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
1963 ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1964 OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);
1966 rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
1970 /* FW 8.10.5.0 requires us to configure MSG_INFO in PRS.
1971 * This would hopefully be moved to MFW.
1973 if (IS_MF_SI(p_hwfn)) {
1977 if (ecore_hw_init_first_eth(p_hwfn, p_ptt, &pf_id) ==
1979 if (p_hwfn->rel_pf_id == pf_id) {
1980 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
1981 "PF[%d] is first ETH on engine\n",
1985 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
1990 /* enable interrupts */
1991 rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
1992 if (rc != ECORE_SUCCESS)
1995 /* send function start command */
1996 rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
1997 allow_npar_tx_switch);
1999 DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n");
2001 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
2002 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2003 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
2005 if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
2007 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
2009 ecore_wr(p_hwfn, p_ptt,
2010 PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
2013 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2014 "PRS_REG_SEARCH registers after start PFn\n");
2015 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
2016 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2017 "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
2018 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
2019 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2020 "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
2021 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
2022 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2023 "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
2024 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
2025 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2026 "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
2027 prs_reg = ecore_rd(p_hwfn, p_ptt,
2028 PRS_REG_SEARCH_TCP_FIRST_FRAG);
2029 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2030 "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
2032 prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
2033 DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
2034 "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
2040 enum _ecore_status_t ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
2041 struct ecore_ptt *p_ptt,
2044 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
2046 /* Change PF in PXP */
2047 ecore_wr(p_hwfn, p_ptt,
2048 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
2050 /* wait until value is set - try for 1 second every 50us */
2051 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
2052 val = ecore_rd(p_hwfn, p_ptt,
2053 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
2060 if (val != set_val) {
2061 DP_NOTICE(p_hwfn, true,
2062 "PFID_ENABLE_MASTER wasn't changed after a second\n");
2063 return ECORE_UNKNOWN_ERROR;
2066 return ECORE_SUCCESS;
2069 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
2070 struct ecore_ptt *p_main_ptt)
2072 /* Read shadow of current MFW mailbox */
2073 ecore_mcp_read_mb(p_hwfn, p_main_ptt);
2074 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
2075 p_hwfn->mcp_info->mfw_mb_cur,
2076 p_hwfn->mcp_info->mfw_mb_length);
2079 static enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
2080 struct ecore_hw_init_params *p_params)
2082 if (p_params->p_tunn) {
2083 ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
2084 ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
2087 p_hwfn->b_int_enabled = 1;
2089 return ECORE_SUCCESS;
2093 ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
2094 struct ecore_drv_load_params *p_drv_load)
2096 OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
2098 if (p_drv_load != OSAL_NULL) {
2099 p_load_req->drv_role = p_drv_load->is_crash_kernel ?
2100 ECORE_DRV_ROLE_KDUMP :
2102 p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
2103 p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
2104 p_load_req->override_force_load =
2105 p_drv_load->override_force_load;
2107 p_load_req->drv_role = ECORE_DRV_ROLE_OS;
2108 p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
2109 p_load_req->avoid_eng_reset = false;
2110 p_load_req->override_force_load =
2111 ECORE_OVERRIDE_FORCE_LOAD_NONE;
2115 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
2116 struct ecore_hw_init_params *p_params)
2118 struct ecore_load_req_params load_req_params;
2119 u32 load_code, param, drv_mb_param;
2120 bool b_default_mtu = true;
2121 struct ecore_hwfn *p_hwfn;
2122 enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
2125 if ((p_params->int_mode == ECORE_INT_MODE_MSI) && (p_dev->num_hwfns > 1)) {
2126 DP_NOTICE(p_dev, false,
2127 "MSI mode is not supported for CMT devices\n");
2132 rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
2133 if (rc != ECORE_SUCCESS)
2137 for_each_hwfn(p_dev, i) {
2138 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2140 /* If management didn't provide a default, set one of our own */
2141 if (!p_hwfn->hw_info.mtu) {
2142 p_hwfn->hw_info.mtu = 1500;
2143 b_default_mtu = false;
2147 ecore_vf_start(p_hwfn, p_params);
2151 /* Enable DMAE in PXP */
2152 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
2153 if (rc != ECORE_SUCCESS)
2156 rc = ecore_calc_hw_mode(p_hwfn);
2157 if (rc != ECORE_SUCCESS)
2160 ecore_fill_load_req_params(&load_req_params,
2161 p_params->p_drv_load_params);
2162 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
2164 if (rc != ECORE_SUCCESS) {
2165 DP_NOTICE(p_hwfn, true,
2166 "Failed sending a LOAD_REQ command\n");
2170 load_code = load_req_params.load_code;
2171 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2172 "Load request was sent. Load code: 0x%x\n",
2175 ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
2178 * When comming back from hiberbate state, the registers from
2179 * which shadow is read initially are not initialized. It turns
2180 * out that these registers get initialized during the call to
2181 * ecore_mcp_load_req request. So we need to reread them here
2182 * to get the proper shadow register value.
2183 * Note: This is a workaround for the missing MFW
2184 * initialization. It may be removed once the implementation
2187 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
2189 /* Only relevant for recovery:
2190 * Clear the indication after the LOAD_REQ command is responded
2193 p_dev->recov_in_prog = false;
2195 p_hwfn->first_on_engine = (load_code ==
2196 FW_MSG_CODE_DRV_LOAD_ENGINE);
2198 if (!qm_lock_init) {
2199 OSAL_SPIN_LOCK_INIT(&qm_lock);
2200 qm_lock_init = true;
2203 switch (load_code) {
2204 case FW_MSG_CODE_DRV_LOAD_ENGINE:
2205 rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
2206 p_hwfn->hw_info.hw_mode);
2207 if (rc != ECORE_SUCCESS)
2210 case FW_MSG_CODE_DRV_LOAD_PORT:
2211 rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
2212 p_hwfn->hw_info.hw_mode);
2213 if (rc != ECORE_SUCCESS)
2216 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
2217 rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
2219 p_hwfn->hw_info.hw_mode,
2220 p_params->b_hw_start,
2222 p_params->allow_npar_tx_switch);
2225 DP_NOTICE(p_hwfn, false,
2226 "Unexpected load code [0x%08x]", load_code);
2231 if (rc != ECORE_SUCCESS)
2232 DP_NOTICE(p_hwfn, true,
2233 "init phase failed for loadcode 0x%x (rc %d)\n",
2236 /* ACK mfw regardless of success or failure of initialization */
2237 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
2238 DRV_MSG_CODE_LOAD_DONE,
2239 0, &load_code, ¶m);
2241 /* Check the return value of the ecore_hw_init_*() function */
2242 if (rc != ECORE_SUCCESS)
2245 /* Check the return value of the LOAD_DONE command */
2246 if (mfw_rc != ECORE_SUCCESS) {
2247 DP_NOTICE(p_hwfn, true,
2248 "Failed sending a LOAD_DONE command\n");
2252 /* Check if there is a DID mismatch between nvm-cfg/efuse */
2253 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
2254 DP_NOTICE(p_hwfn, false,
2255 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
2257 /* send DCBX attention request command */
2258 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
2259 "sending phony dcbx set command to trigger DCBx attention handling\n");
2260 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
2261 DRV_MSG_CODE_SET_DCBX,
2262 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
2263 &load_code, ¶m);
2264 if (mfw_rc != ECORE_SUCCESS) {
2265 DP_NOTICE(p_hwfn, true,
2266 "Failed to send DCBX attention request\n");
2270 p_hwfn->hw_init_done = true;
2274 p_hwfn = ECORE_LEADING_HWFN(p_dev);
2275 drv_mb_param = STORM_FW_VERSION;
2276 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
2277 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
2278 drv_mb_param, &load_code, ¶m);
2279 if (rc != ECORE_SUCCESS)
2280 DP_INFO(p_hwfn, "Failed to update firmware version\n");
2282 if (!b_default_mtu) {
2283 rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
2284 p_hwfn->hw_info.mtu);
2285 if (rc != ECORE_SUCCESS)
2286 DP_INFO(p_hwfn, "Failed to update default mtu\n");
2289 rc = ecore_mcp_ov_update_driver_state(p_hwfn,
2291 ECORE_OV_DRIVER_STATE_DISABLED);
2292 if (rc != ECORE_SUCCESS)
2293 DP_INFO(p_hwfn, "Failed to update driver state\n");
2295 rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
2296 ECORE_OV_ESWITCH_VEB);
2297 if (rc != ECORE_SUCCESS)
2298 DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
2304 #define ECORE_HW_STOP_RETRY_LIMIT (10)
2305 static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
2306 struct ecore_hwfn *p_hwfn,
2307 struct ecore_ptt *p_ptt)
2312 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
2313 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
2315 i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
2317 if ((!ecore_rd(p_hwfn, p_ptt,
2318 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
2319 (!ecore_rd(p_hwfn, p_ptt,
2320 TM_REG_PF_SCAN_ACTIVE_TASK)))
2323 /* Dependent on number of connection/tasks, possibly
2324 * 1ms sleep is required between polls
2329 if (i < ECORE_HW_STOP_RETRY_LIMIT)
2332 DP_NOTICE(p_hwfn, true,
2333 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
2334 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
2335 (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
2338 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
2342 for_each_hwfn(p_dev, j) {
2343 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
2344 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
2346 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
2350 static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
2351 struct ecore_ptt *p_ptt,
2352 u32 addr, u32 expected_val)
2354 u32 val = ecore_rd(p_hwfn, p_ptt, addr);
2356 if (val != expected_val) {
2357 DP_NOTICE(p_hwfn, true,
2358 "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
2359 addr, val, expected_val);
2360 return ECORE_UNKNOWN_ERROR;
2363 return ECORE_SUCCESS;
2366 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
2368 struct ecore_hwfn *p_hwfn;
2369 struct ecore_ptt *p_ptt;
2370 enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
2373 for_each_hwfn(p_dev, j) {
2374 p_hwfn = &p_dev->hwfns[j];
2375 p_ptt = p_hwfn->p_main_ptt;
2377 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
2380 ecore_vf_pf_int_cleanup(p_hwfn);
2381 rc = ecore_vf_pf_reset(p_hwfn);
2382 if (rc != ECORE_SUCCESS) {
2383 DP_NOTICE(p_hwfn, true,
2384 "ecore_vf_pf_reset failed. rc = %d.\n",
2386 rc2 = ECORE_UNKNOWN_ERROR;
2391 /* mark the hw as uninitialized... */
2392 p_hwfn->hw_init_done = false;
2394 /* Send unload command to MCP */
2395 if (!p_dev->recov_in_prog) {
2396 rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
2397 if (rc != ECORE_SUCCESS) {
2398 DP_NOTICE(p_hwfn, true,
2399 "Failed sending a UNLOAD_REQ command. rc = %d.\n",
2401 rc2 = ECORE_UNKNOWN_ERROR;
2405 OSAL_DPC_SYNC(p_hwfn);
2407 /* After this point no MFW attentions are expected, e.g. prevent
2408 * race between pf stop and dcbx pf update.
2411 rc = ecore_sp_pf_stop(p_hwfn);
2412 if (rc != ECORE_SUCCESS) {
2413 DP_NOTICE(p_hwfn, true,
2414 "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
2416 rc2 = ECORE_UNKNOWN_ERROR;
2419 /* perform debug action after PF stop was sent */
2420 OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
2422 /* close NIG to BRB gate */
2423 ecore_wr(p_hwfn, p_ptt,
2424 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
2427 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
2428 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
2429 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
2430 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
2431 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
2433 /* @@@TBD - clean transmission queues (5.b) */
2434 /* @@@TBD - clean BTB (5.c) */
2436 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
2438 /* @@@TBD - verify DMAE requests are done (8) */
2440 /* Disable Attention Generation */
2441 ecore_int_igu_disable_int(p_hwfn, p_ptt);
2442 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
2443 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
2444 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
2445 rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
2446 if (rc != ECORE_SUCCESS) {
2447 DP_NOTICE(p_hwfn, true,
2448 "Failed to return IGU CAM to default\n");
2449 rc2 = ECORE_UNKNOWN_ERROR;
2452 /* Need to wait 1ms to guarantee SBs are cleared */
2455 if (!p_dev->recov_in_prog) {
2456 ecore_verify_reg_val(p_hwfn, p_ptt,
2457 QM_REG_USG_CNT_PF_TX, 0);
2458 ecore_verify_reg_val(p_hwfn, p_ptt,
2459 QM_REG_USG_CNT_PF_OTHER, 0);
2460 /* @@@TBD - assert on incorrect xCFC values (10.b) */
2463 /* Disable PF in HW blocks */
2464 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
2465 ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
2467 if (!p_dev->recov_in_prog) {
2468 ecore_mcp_unload_done(p_hwfn, p_ptt);
2469 if (rc != ECORE_SUCCESS) {
2470 DP_NOTICE(p_hwfn, true,
2471 "Failed sending a UNLOAD_DONE command. rc = %d.\n",
2473 rc2 = ECORE_UNKNOWN_ERROR;
2479 p_hwfn = ECORE_LEADING_HWFN(p_dev);
2480 p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
2482 /* Disable DMAE in PXP - in CMT, this should only be done for
2483 * first hw-function, and only after all transactions have
2484 * stopped for all active hw-functions.
2486 rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
2487 if (rc != ECORE_SUCCESS) {
2488 DP_NOTICE(p_hwfn, true,
2489 "ecore_change_pci_hwfn failed. rc = %d.\n",
2491 rc2 = ECORE_UNKNOWN_ERROR;
2498 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
2502 for_each_hwfn(p_dev, j) {
2503 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
2504 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
2507 ecore_vf_pf_int_cleanup(p_hwfn);
2511 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Shutting down the fastpath\n");
2513 ecore_wr(p_hwfn, p_ptt,
2514 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
2516 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
2517 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
2518 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
2519 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
2520 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
2522 /* @@@TBD - clean transmission queues (5.b) */
2523 /* @@@TBD - clean BTB (5.c) */
2525 /* @@@TBD - verify DMAE requests are done (8) */
2527 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
2528 /* Need to wait 1ms to guarantee SBs are cleared */
2533 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
2535 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
2537 if (IS_VF(p_hwfn->p_dev))
2540 /* If roce info is allocated it means roce is initialized and should
2541 * be enabled in searcher.
2543 if (p_hwfn->p_rdma_info) {
2544 if (p_hwfn->b_rdma_enabled_in_prs)
2545 ecore_wr(p_hwfn, p_ptt,
2546 p_hwfn->rdma_prs_search_reg, 0x1);
2547 ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1);
2550 /* Re-open incoming traffic */
2551 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2552 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
2554 /* TEMP macro to be removed when wol code revisted */
2555 #define ECORE_WOL_WR(_p_hwfn, _p_ptt, _offset, _val) ECORE_IS_BB(_p_hwfn->p_dev) ? \
2556 ecore_wr(_p_hwfn, _p_ptt, _offset, _val) : \
2557 ecore_mcp_wol_wr(_p_hwfn, _p_ptt, _offset, _val);
2559 enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
2560 const bool b_enable,
2565 struct ecore_hwfn *hwfn = &p_dev->hwfns[0];
2569 /* Get length and CRC register offsets */
2573 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_LEN_BB :
2574 WOL_REG_ACPI_PAT_0_LEN_K2_E5;
2575 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_0_CRC_BB :
2576 WOL_REG_ACPI_PAT_0_CRC_K2_E5;
2579 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_LEN_BB :
2580 WOL_REG_ACPI_PAT_1_LEN_K2_E5;
2581 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_1_CRC_BB :
2582 WOL_REG_ACPI_PAT_1_CRC_K2_E5;
2585 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_LEN_BB :
2586 WOL_REG_ACPI_PAT_2_LEN_K2_E5;
2587 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_2_CRC_BB :
2588 WOL_REG_ACPI_PAT_2_CRC_K2_E5;
2591 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_LEN_BB :
2592 WOL_REG_ACPI_PAT_3_LEN_K2_E5;
2593 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_3_CRC_BB :
2594 WOL_REG_ACPI_PAT_3_CRC_K2_E5;
2597 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_LEN_BB :
2598 WOL_REG_ACPI_PAT_4_LEN_K2_E5;
2599 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_4_CRC_BB :
2600 WOL_REG_ACPI_PAT_4_CRC_K2_E5;
2603 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_LEN_BB :
2604 WOL_REG_ACPI_PAT_5_LEN_K2_E5;
2605 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_5_CRC_BB :
2606 WOL_REG_ACPI_PAT_5_CRC_K2_E5;
2609 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_LEN_BB :
2610 WOL_REG_ACPI_PAT_6_LEN_K2_E5;
2611 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_6_CRC_BB :
2612 WOL_REG_ACPI_PAT_6_CRC_K2_E5;
2615 reg_len = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_LEN_BB :
2616 WOL_REG_ACPI_PAT_7_LEN_K2_E5;
2617 reg_crc = ECORE_IS_BB(p_dev) ? NIG_REG_ACPI_PAT_7_CRC_BB :
2618 WOL_REG_ACPI_PAT_7_CRC_K2_E5;
2621 return ECORE_UNKNOWN_ERROR;
2624 /* Allign pattern size to 4 */
2625 while (pattern_size % 4)
2629 /* write pattern length */
2630 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_len, pattern_size);
2632 /* write crc value*/
2633 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, reg_crc, crc);
2636 "ecore_set_nwuf_reg: idx[%d] reg_crc[0x%x=0x%08x] "
2637 "reg_len[0x%x=0x%x]\n",
2638 reg_idx, reg_crc, crc, reg_len, pattern_size);
2640 return ECORE_SUCCESS;
2643 void ecore_wol_buffer_clear(struct ecore_dev *p_dev)
2645 struct ecore_hwfn *hwfn = &p_dev->hwfns[0];
2646 const u32 wake_buffer_clear_offset =
2647 ECORE_IS_BB(p_dev) ?
2648 NIG_REG_WAKE_BUFFER_CLEAR_BB : WOL_REG_WAKE_BUFFER_CLEAR_K2_E5;
2651 "ecore_wol_buffer_clear: reset "
2652 "REG_WAKE_BUFFER_CLEAR offset=0x%08x\n",
2653 wake_buffer_clear_offset);
2655 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 1);
2656 ECORE_WOL_WR(hwfn, hwfn->p_main_ptt, wake_buffer_clear_offset, 0);
2659 enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev,
2660 struct ecore_wake_info *wake_info)
2662 struct ecore_hwfn *hwfn = &p_dev->hwfns[0];
2663 u32 *buf = OSAL_NULL;
2665 const u32 reg_wake_buffer_offest =
2666 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_BUFFER_BB :
2667 WOL_REG_WAKE_BUFFER_K2_E5;
2669 wake_info->wk_info = ecore_rd(hwfn, hwfn->p_main_ptt,
2670 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_INFO_BB :
2671 WOL_REG_WAKE_INFO_K2_E5);
2672 wake_info->wk_details = ecore_rd(hwfn, hwfn->p_main_ptt,
2673 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_DETAILS_BB :
2674 WOL_REG_WAKE_DETAILS_K2_E5);
2675 wake_info->wk_pkt_len = ecore_rd(hwfn, hwfn->p_main_ptt,
2676 ECORE_IS_BB(p_dev) ? NIG_REG_WAKE_PKT_LEN_BB :
2677 WOL_REG_WAKE_PKT_LEN_K2_E5);
2680 "ecore_get_wake_info: REG_WAKE_INFO=0x%08x "
2681 "REG_WAKE_DETAILS=0x%08x "
2682 "REG_WAKE_PKT_LEN=0x%08x\n",
2684 wake_info->wk_details,
2685 wake_info->wk_pkt_len);
2687 buf = (u32 *)wake_info->wk_buffer;
2689 for (i = 0; i < (wake_info->wk_pkt_len / sizeof(u32)); i++)
2691 if ((i*sizeof(u32)) >= sizeof(wake_info->wk_buffer))
2694 "ecore_get_wake_info: i index to 0 high=%d\n",
2698 buf[i] = ecore_rd(hwfn, hwfn->p_main_ptt,
2699 reg_wake_buffer_offest + (i * sizeof(u32)));
2700 DP_INFO(p_dev, "ecore_get_wake_info: wk_buffer[%u]: 0x%08x\n",
2704 ecore_wol_buffer_clear(p_dev);
2706 return ECORE_SUCCESS;
2709 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
2710 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
2712 ecore_ptt_pool_free(p_hwfn);
2713 OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
2714 p_hwfn->hw_info.p_igu_info = OSAL_NULL;
2717 /* Setup bar access */
2718 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
2720 /* clear indirect access */
2721 if (ECORE_IS_AH(p_hwfn->p_dev) || ECORE_IS_E5(p_hwfn->p_dev)) {
2722 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2723 PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
2724 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2725 PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
2726 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2727 PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
2728 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2729 PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
2731 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2732 PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
2733 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2734 PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
2735 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2736 PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
2737 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2738 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
2741 /* Clean Previous errors if such exist */
2742 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2743 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
2744 1 << p_hwfn->abs_pf_id);
2746 /* enable internal target-read */
2747 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2748 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
2751 static void get_function_id(struct ecore_hwfn *p_hwfn)
2754 p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
2755 PXP_PF_ME_OPAQUE_ADDR);
2757 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
2759 /* Bits 16-19 from the ME registers are the pf_num */
2760 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
2761 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
2762 PXP_CONCRETE_FID_PFID);
2763 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
2764 PXP_CONCRETE_FID_PORT);
2766 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
2767 "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
2768 p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
2771 void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
2773 u32 *feat_num = p_hwfn->hw_info.feat_num;
2774 struct ecore_sb_cnt_info sb_cnt;
2777 OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
2778 ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
2780 #ifdef CONFIG_ECORE_ROCE
2781 /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the
2782 * status blocks equally between L2 / RoCE but with consideration as
2783 * to how many l2 queues / cnqs we have
2785 if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
2788 feat_num[ECORE_RDMA_CNQ] =
2791 RESC_NUM(p_hwfn, ECORE_RDMA_CNQ_RAM));
2793 /* Upper layer might require less */
2794 max_cnqs = (u32)p_hwfn->pf_params.rdma_pf_params.max_cnqs;
2796 if (max_cnqs == ECORE_RDMA_PF_PARAMS_CNQS_NONE)
2798 feat_num[ECORE_RDMA_CNQ] =
2800 feat_num[ECORE_RDMA_CNQ],
2804 non_l2_sbs = feat_num[ECORE_RDMA_CNQ];
2808 /* L2 Queues require each: 1 status block. 1 L2 queue */
2809 if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
2810 /* Start by allocating VF queues, then PF's */
2811 feat_num[ECORE_VF_L2_QUE] =
2813 RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
2815 feat_num[ECORE_PF_L2_QUE] =
2817 sb_cnt.cnt - non_l2_sbs,
2818 RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
2819 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
2822 if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
2823 feat_num[ECORE_FCOE_CQ] =
2824 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
2827 if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
2828 feat_num[ECORE_ISCSI_CQ] =
2829 OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
2832 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
2833 "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
2834 (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
2835 (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
2836 (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
2837 (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
2838 (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
2842 const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
2845 case ECORE_L2_QUEUE:
2859 case ECORE_RDMA_CNQ_RAM:
2860 return "RDMA_CNQ_RAM";
2863 case ECORE_LL2_QUEUE:
2865 case ECORE_CMDQS_CQS:
2867 case ECORE_RDMA_STATS_QUEUE:
2868 return "RDMA_STATS_QUEUE";
2874 return "UNKNOWN_RESOURCE";
2878 static enum _ecore_status_t
2879 __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
2880 enum ecore_resources res_id, u32 resc_max_val,
2883 enum _ecore_status_t rc;
2885 rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
2886 resc_max_val, p_mcp_resp);
2887 if (rc != ECORE_SUCCESS) {
2888 DP_NOTICE(p_hwfn, true,
2889 "MFW response failure for a max value setting of resource %d [%s]\n",
2890 res_id, ecore_hw_get_resc_name(res_id));
2894 if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
2896 "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
2897 res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
2899 return ECORE_SUCCESS;
2902 static enum _ecore_status_t
2903 ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
2905 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
2906 u32 resc_max_val, mcp_resp;
2908 enum _ecore_status_t rc;
2910 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
2912 case ECORE_LL2_QUEUE:
2913 resc_max_val = MAX_NUM_LL2_RX_QUEUES;
2915 case ECORE_RDMA_CNQ_RAM:
2916 /* No need for a case for ECORE_CMDQS_CQS since
2917 * CNQ/CMDQS are the same resource.
2919 resc_max_val = NUM_OF_GLOBAL_QUEUES;
2921 case ECORE_RDMA_STATS_QUEUE:
2922 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
2923 : RDMA_NUM_STATISTIC_COUNTERS_BB;
2926 resc_max_val = BDQ_NUM_RESOURCES;
2932 rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
2933 resc_max_val, &mcp_resp);
2934 if (rc != ECORE_SUCCESS)
2937 /* There's no point to continue to the next resource if the
2938 * command is not supported by the MFW.
2939 * We do continue if the command is supported but the resource
2940 * is unknown to the MFW. Such a resource will be later
2941 * configured with the default allocation values.
2943 if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
2944 return ECORE_NOTIMPL;
2947 return ECORE_SUCCESS;
2951 enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
2952 enum ecore_resources res_id,
2953 u32 *p_resc_num, u32 *p_resc_start)
2955 u8 num_funcs = p_hwfn->num_funcs_on_engine;
2956 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
2959 case ECORE_L2_QUEUE:
2960 *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
2961 MAX_NUM_L2_QUEUES_BB) / num_funcs;
2964 *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
2965 MAX_NUM_VPORTS_BB) / num_funcs;
2968 *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
2969 ETH_RSS_ENGINE_NUM_BB) / num_funcs;
2972 *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
2973 MAX_QM_TX_QUEUES_BB) / num_funcs;
2974 *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
2977 *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
2981 /* Each VFC resource can accommodate both a MAC and a VLAN */
2982 *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
2985 *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
2986 PXP_NUM_ILT_RECORDS_BB) / num_funcs;
2988 case ECORE_LL2_QUEUE:
2989 *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
2991 case ECORE_RDMA_CNQ_RAM:
2992 case ECORE_CMDQS_CQS:
2993 /* CNQ/CMDQS are the same resource */
2994 *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
2996 case ECORE_RDMA_STATS_QUEUE:
2997 *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
2998 RDMA_NUM_STATISTIC_COUNTERS_BB) /
3002 if (p_hwfn->hw_info.personality != ECORE_PCI_ISCSI &&
3003 p_hwfn->hw_info.personality != ECORE_PCI_FCOE)
3009 /* Since we want its value to reflect whether MFW supports
3010 * the new scheme, have a default of 0.
3022 else if (p_hwfn->p_dev->num_ports_in_engines == 4)
3023 *p_resc_start = p_hwfn->port_id;
3024 else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI)
3025 *p_resc_start = p_hwfn->port_id;
3026 else if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
3027 *p_resc_start = p_hwfn->port_id + 2;
3030 *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
3034 return ECORE_SUCCESS;
3037 static enum _ecore_status_t
3038 __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
3039 bool drv_resc_alloc)
3041 u32 dflt_resc_num = 0, dflt_resc_start = 0;
3042 u32 mcp_resp, *p_resc_num, *p_resc_start;
3043 enum _ecore_status_t rc;
3045 p_resc_num = &RESC_NUM(p_hwfn, res_id);
3046 p_resc_start = &RESC_START(p_hwfn, res_id);
3048 rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
3050 if (rc != ECORE_SUCCESS) {
3052 "Failed to get default amount for resource %d [%s]\n",
3053 res_id, ecore_hw_get_resc_name(res_id));
3058 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
3059 *p_resc_num = dflt_resc_num;
3060 *p_resc_start = dflt_resc_start;
3065 rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
3066 &mcp_resp, p_resc_num, p_resc_start);
3067 if (rc != ECORE_SUCCESS) {
3068 DP_NOTICE(p_hwfn, true,
3069 "MFW response failure for an allocation request for resource %d [%s]\n",
3070 res_id, ecore_hw_get_resc_name(res_id));
3074 /* Default driver values are applied in the following cases:
3075 * - The resource allocation MB command is not supported by the MFW
3076 * - There is an internal error in the MFW while processing the request
3077 * - The resource ID is unknown to the MFW
3079 if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3081 "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
3082 res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
3083 dflt_resc_num, dflt_resc_start);
3084 *p_resc_num = dflt_resc_num;
3085 *p_resc_start = dflt_resc_start;
3089 if ((*p_resc_num != dflt_resc_num ||
3090 *p_resc_start != dflt_resc_start) &&
3091 res_id != ECORE_SB) {
3093 "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
3094 res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
3095 *p_resc_start, dflt_resc_num, dflt_resc_start,
3096 drv_resc_alloc ? " - Applying default values" : "");
3097 if (drv_resc_alloc) {
3098 *p_resc_num = dflt_resc_num;
3099 *p_resc_start = dflt_resc_start;
3103 /* PQs have to divide by 8 [that's the HW granularity].
3104 * Reduce number so it would fit.
3106 if ((res_id == ECORE_PQ) &&
3107 ((*p_resc_num % 8) || (*p_resc_start % 8))) {
3109 "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
3110 *p_resc_num, (*p_resc_num) & ~0x7,
3111 *p_resc_start, (*p_resc_start) & ~0x7);
3112 *p_resc_num &= ~0x7;
3113 *p_resc_start &= ~0x7;
3116 return ECORE_SUCCESS;
3119 static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
3120 bool drv_resc_alloc)
3122 enum _ecore_status_t rc;
3125 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
3126 rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
3127 if (rc != ECORE_SUCCESS)
3131 return ECORE_SUCCESS;
3134 #define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
3135 #define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
3137 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
3138 bool drv_resc_alloc)
3140 struct ecore_resc_unlock_params resc_unlock_params;
3141 struct ecore_resc_lock_params resc_lock_params;
3142 bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
3144 enum _ecore_status_t rc;
3146 u32 *resc_start = p_hwfn->hw_info.resc_start;
3147 u32 *resc_num = p_hwfn->hw_info.resc_num;
3148 /* For AH, an equal share of the ILT lines between the maximal number of
3149 * PFs is not enough for RoCE. This would be solved by the future
3150 * resource allocation scheme, but isn't currently present for
3151 * FPGA/emulation. For now we keep a number that is sufficient for RoCE
3152 * to work - the BB number of ILT lines divided by its max PFs number.
3154 u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
3157 /* Setting the max values of the soft resources and the following
3158 * resources allocation queries should be atomic. Since several PFs can
3159 * run in parallel - a resource lock is needed.
3160 * If either the resource lock or resource set value commands are not
3161 * supported - skip the the max values setting, release the lock if
3162 * needed, and proceed to the queries. Other failures, including a
3163 * failure to acquire the lock, will cause this function to fail.
3164 * Old drivers that don't acquire the lock can run in parallel, and
3165 * their allocation values won't be affected by the updated max values.
3167 OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
3168 resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
3169 resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
3170 resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
3171 resc_lock_params.sleep_b4_retry = true;
3172 OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
3173 resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
3175 rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
3176 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
3178 } else if (rc == ECORE_NOTIMPL) {
3180 "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
3181 } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
3182 DP_NOTICE(p_hwfn, false,
3183 "Failed to acquire the resource lock for the resource allocation commands\n");
3186 rc = ecore_hw_set_soft_resc_size(p_hwfn);
3187 if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
3188 DP_NOTICE(p_hwfn, false,
3189 "Failed to set the max values of the soft resources\n");
3190 goto unlock_and_exit;
3191 } else if (rc == ECORE_NOTIMPL) {
3193 "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
3194 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
3195 &resc_unlock_params);
3196 if (rc != ECORE_SUCCESS)
3198 "Failed to release the resource lock for the resource allocation commands\n");
3202 rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
3203 if (rc != ECORE_SUCCESS)
3204 goto unlock_and_exit;
3206 if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
3207 rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
3208 &resc_unlock_params);
3209 if (rc != ECORE_SUCCESS)
3211 "Failed to release the resource lock for the resource allocation commands\n");
3215 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
3216 /* Reduced build contains less PQs */
3217 if (!(p_hwfn->p_dev->b_is_emul_full)) {
3218 resc_num[ECORE_PQ] = 32;
3219 resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
3220 p_hwfn->enabled_func_idx;
3223 /* For AH emulation, since we have a possible maximal number of
3224 * 16 enabled PFs, in case there are not enough ILT lines -
3225 * allocate only first PF as RoCE and have all the other ETH
3226 * only with less ILT lines.
3228 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
3229 resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
3230 resc_num[ECORE_ILT],
3231 roce_min_ilt_lines);
3234 /* Correct the common ILT calculation if PF0 has more */
3235 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
3236 p_hwfn->p_dev->b_is_emul_full &&
3237 p_hwfn->rel_pf_id &&
3238 resc_num[ECORE_ILT] < roce_min_ilt_lines)
3239 resc_start[ECORE_ILT] += roce_min_ilt_lines -
3240 resc_num[ECORE_ILT];
3243 /* Sanity for ILT */
3244 if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
3245 (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
3246 DP_NOTICE(p_hwfn, true, "Can't assign ILT pages [%08x,...,%08x]\n",
3247 RESC_START(p_hwfn, ECORE_ILT),
3248 RESC_END(p_hwfn, ECORE_ILT) - 1);
3252 /* This will also learn the number of SBs from MFW */
3253 if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt))
3256 ecore_hw_set_feat(p_hwfn);
3258 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
3259 "The numbers for each resource are:\n");
3260 for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
3261 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
3262 ecore_hw_get_resc_name(res_id),
3263 RESC_NUM(p_hwfn, res_id),
3264 RESC_START(p_hwfn, res_id));
3266 return ECORE_SUCCESS;
3269 if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
3270 ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
3271 &resc_unlock_params);
3275 static enum _ecore_status_t
3276 ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
3277 struct ecore_ptt *p_ptt,
3278 struct ecore_hw_prepare_params *p_params)
3280 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
3281 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
3282 struct ecore_mcp_link_capabilities *p_caps;
3283 struct ecore_mcp_link_params *link;
3284 enum _ecore_status_t rc;
3286 /* Read global nvm_cfg address */
3287 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
3289 /* Verify MCP has initialized it */
3290 if (!nvm_cfg_addr) {
3291 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
3292 if (p_params->b_relaxed_probe)
3293 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
3297 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
3298 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
3300 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3301 OFFSETOF(struct nvm_cfg1, glob) +
3302 OFFSETOF(struct nvm_cfg1_glob, core_cfg);
3304 core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
3306 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
3307 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
3308 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
3309 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
3311 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
3312 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
3314 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
3315 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
3317 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
3318 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
3320 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
3321 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
3323 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
3324 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
3326 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
3327 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
3329 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
3330 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
3332 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
3333 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
3335 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
3336 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
3338 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
3339 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
3342 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
3347 /* Read DCBX configuration */
3348 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3349 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
3350 dcbx_mode = ecore_rd(p_hwfn, p_ptt,
3352 OFFSETOF(struct nvm_cfg1_port, generic_cont0));
3353 dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
3354 >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
3355 switch (dcbx_mode) {
3356 case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
3357 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
3359 case NVM_CFG1_PORT_DCBX_MODE_CEE:
3360 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
3362 case NVM_CFG1_PORT_DCBX_MODE_IEEE:
3363 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
3366 p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
3369 /* Read default link configuration */
3370 link = &p_hwfn->mcp_info->link_input;
3371 p_caps = &p_hwfn->mcp_info->link_capabilities;
3372 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3373 OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
3374 link_temp = ecore_rd(p_hwfn, p_ptt,
3376 OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
3377 link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
3378 link->speed.advertised_speeds = link_temp;
3379 p_caps->speed_capabilities = link->speed.advertised_speeds;
3381 link_temp = ecore_rd(p_hwfn, p_ptt,
3383 OFFSETOF(struct nvm_cfg1_port, link_settings));
3384 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
3385 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
3386 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
3387 link->speed.autoneg = true;
3389 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
3390 link->speed.forced_speed = 1000;
3392 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
3393 link->speed.forced_speed = 10000;
3395 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
3396 link->speed.forced_speed = 25000;
3398 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
3399 link->speed.forced_speed = 40000;
3401 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
3402 link->speed.forced_speed = 50000;
3404 case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
3405 link->speed.forced_speed = 100000;
3408 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n",
3412 p_caps->default_speed = link->speed.forced_speed;
3413 p_caps->default_speed_autoneg = link->speed.autoneg;
3415 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
3416 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
3417 link->pause.autoneg = !!(link_temp &
3418 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
3419 link->pause.forced_rx = !!(link_temp &
3420 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
3421 link->pause.forced_tx = !!(link_temp &
3422 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
3423 link->loopback_mode = 0;
3425 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
3426 link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
3427 OFFSETOF(struct nvm_cfg1_port, ext_phy));
3428 link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
3429 link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
3430 p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
3431 link->eee.enable = true;
3432 switch (link_temp) {
3433 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
3434 p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
3435 link->eee.enable = false;
3437 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
3438 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
3440 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
3441 p_caps->eee_lpi_timer =
3442 EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
3444 case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
3445 p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
3448 link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
3449 link->eee.tx_lpi_enable = link->eee.enable;
3450 if (link->eee.enable)
3451 link->eee.adv_caps = ECORE_EEE_1G_ADV |
3454 p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
3457 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3458 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
3459 link->speed.forced_speed, link->speed.advertised_speeds,
3460 link->speed.autoneg, link->pause.autoneg,
3461 p_caps->default_eee, p_caps->eee_lpi_timer);
3463 /* Read Multi-function information from shmem */
3464 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3465 OFFSETOF(struct nvm_cfg1, glob) +
3466 OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
3468 generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
3470 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
3471 NVM_CFG1_GLOB_MF_MODE_OFFSET;
3474 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
3475 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
3477 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
3478 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
3480 case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
3481 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
3484 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
3485 p_hwfn->p_dev->mf_mode);
3487 /* Read Multi-function information from shmem */
3488 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
3489 OFFSETOF(struct nvm_cfg1, glob) +
3490 OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
3492 device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
3493 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
3494 OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
3495 &p_hwfn->hw_info.device_capabilities);
3496 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
3497 OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
3498 &p_hwfn->hw_info.device_capabilities);
3499 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
3500 OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
3501 &p_hwfn->hw_info.device_capabilities);
3502 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
3503 OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
3504 &p_hwfn->hw_info.device_capabilities);
3505 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
3506 OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
3507 &p_hwfn->hw_info.device_capabilities);
3509 rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
3510 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
3512 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
3518 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
3519 struct ecore_ptt *p_ptt)
3521 u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
3522 u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
3523 struct ecore_dev *p_dev = p_hwfn->p_dev;
3525 num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
3527 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
3528 * in the other bits are selected.
3529 * Bits 1-15 are for functions 1-15, respectively, and their value is
3530 * '0' only for enabled functions (function 0 always exists and
3532 * In case of CMT in BB, only the "even" functions are enabled, and thus
3533 * the number of functions for both hwfns is learnt from the same bits.
3535 if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) {
3536 reg_function_hide = ecore_rd(p_hwfn, p_ptt,
3537 MISCS_REG_FUNCTION_HIDE_BB_K2);
3539 reg_function_hide = 0;
3540 ECORE_E5_MISSING_CODE;
3543 if (reg_function_hide & 0x1) {
3544 if (ECORE_IS_BB(p_dev)) {
3545 if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
3557 /* Get the number of the enabled functions on the engine */
3558 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
3565 /* Get the PF index within the enabled functions */
3566 low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
3567 tmp = reg_function_hide & eng_mask & low_pfs_mask;
3575 p_hwfn->num_funcs_on_engine = num_funcs;
3576 p_hwfn->enabled_func_idx = enabled_func_idx;
3579 if (CHIP_REV_IS_FPGA(p_dev)) {
3580 DP_NOTICE(p_hwfn, false,
3581 "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
3582 p_hwfn->num_funcs_on_engine = 4;
3586 DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
3587 "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
3588 p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
3589 p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
3592 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
3593 struct ecore_ptt *p_ptt)
3598 /* Read the port mode */
3599 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
3601 else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
3602 (p_hwfn->p_dev->num_hwfns > 1))
3603 /* In CMT on emulation, assume 1 port */
3607 port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
3609 if (port_mode < 3) {
3610 p_hwfn->p_dev->num_ports_in_engines = 1;
3611 } else if (port_mode <= 5) {
3612 p_hwfn->p_dev->num_ports_in_engines = 2;
3614 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
3615 p_hwfn->p_dev->num_ports_in_engines);
3617 /* Default num_ports_in_engines to something */
3618 p_hwfn->p_dev->num_ports_in_engines = 1;
3622 static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
3623 struct ecore_ptt *p_ptt)
3628 p_hwfn->p_dev->num_ports_in_engines = 0;
3631 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
3632 port = ecore_rd(p_hwfn, p_ptt,
3633 MISCS_REG_ECO_RESERVED);
3634 switch ((port & 0xf000) >> 12) {
3636 p_hwfn->p_dev->num_ports_in_engines = 1;
3639 p_hwfn->p_dev->num_ports_in_engines = 2;
3642 p_hwfn->p_dev->num_ports_in_engines = 4;
3645 DP_NOTICE(p_hwfn, false,
3646 "Unknown port mode in ECO_RESERVED %08x\n",
3651 for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
3652 port = ecore_rd(p_hwfn, p_ptt,
3653 CNIG_REG_NIG_PORT0_CONF_K2_E5 + (i * 4));
3655 p_hwfn->p_dev->num_ports_in_engines++;
3659 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
3660 struct ecore_ptt *p_ptt)
3662 if (ECORE_IS_BB(p_hwfn->p_dev))
3663 ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
3665 ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
3668 static enum _ecore_status_t
3669 ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
3670 enum ecore_pci_personality personality,
3671 struct ecore_hw_prepare_params *p_params)
3673 bool drv_resc_alloc = p_params->drv_resc_alloc;
3674 enum _ecore_status_t rc;
3676 /* Since all information is common, only first hwfns should do this */
3677 if (IS_LEAD_HWFN(p_hwfn)) {
3678 rc = ecore_iov_hw_info(p_hwfn);
3679 if (rc != ECORE_SUCCESS) {
3680 if (p_params->b_relaxed_probe)
3681 p_params->p_relaxed_res =
3682 ECORE_HW_PREPARE_BAD_IOV;
3688 /* TODO In get_hw_info, amoungst others:
3689 * Get MCP FW revision and determine according to it the supported
3690 * featrues (e.g. DCB)
3692 * ecore_get_pcie_width_speed, WOL capability.
3693 * Number of global CQ-s (for storage
3695 ecore_hw_info_port_num(p_hwfn, p_ptt);
3697 ecore_mcp_get_capabilities(p_hwfn, p_ptt);
3700 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
3702 rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
3703 if (rc != ECORE_SUCCESS)
3709 rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
3710 if (rc != ECORE_SUCCESS) {
3711 if (p_params->b_relaxed_probe)
3712 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
3718 if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
3720 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
3721 p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
3724 static u8 mcp_hw_mac[6] = {0, 2, 3, 4, 5, 6};
3726 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
3727 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
3731 if (ecore_mcp_is_init(p_hwfn)) {
3732 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
3733 p_hwfn->hw_info.ovlan =
3734 p_hwfn->mcp_info->func_info.ovlan;
3736 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
3739 if (personality != ECORE_PCI_DEFAULT) {
3740 p_hwfn->hw_info.personality = personality;
3741 } else if (ecore_mcp_is_init(p_hwfn)) {
3742 enum ecore_pci_personality protocol;
3744 protocol = p_hwfn->mcp_info->func_info.protocol;
3745 p_hwfn->hw_info.personality = protocol;
3749 /* To overcome ILT lack for emulation, until at least until we'll have
3750 * a definite answer from system about it, allow only PF0 to be RoCE.
3752 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
3753 if (!p_hwfn->rel_pf_id)
3754 p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
3756 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
3760 /* although in BB some constellations may support more than 4 tcs,
3761 * that can result in performance penalty in some cases. 4
3762 * represents a good tradeoff between performance and flexibility.
3764 p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
3766 /* start out with a single active tc. This can be increased either
3767 * by dcbx negotiation or by upper layer driver
3769 p_hwfn->hw_info.num_active_tc = 1;
3771 ecore_get_num_funcs(p_hwfn, p_ptt);
3773 if (ecore_mcp_is_init(p_hwfn))
3774 p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
3776 /* In case of forcing the driver's default resource allocation, calling
3777 * ecore_hw_get_resc() should come after initializing the personality
3778 * and after getting the number of functions, since the calculation of
3779 * the resources/features depends on them.
3780 * This order is not harmful if not forcing.
3782 rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
3783 if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
3785 p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
3791 static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
3793 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
3797 /* Read Vendor Id / Device Id */
3798 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
3800 OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
3803 /* Determine type */
3804 device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
3805 switch (device_id_mask) {
3806 case ECORE_DEV_ID_MASK_BB:
3807 p_dev->type = ECORE_DEV_TYPE_BB;
3809 case ECORE_DEV_ID_MASK_AH:
3810 p_dev->type = ECORE_DEV_TYPE_AH;
3813 DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
3815 return ECORE_ABORTED;
3818 p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
3819 MISCS_REG_CHIP_NUM);
3820 p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
3821 MISCS_REG_CHIP_REV);
3823 MASK_FIELD(CHIP_REV, p_dev->chip_rev);
3825 /* Learn number of HW-functions */
3826 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
3827 MISCS_REG_CMT_ENABLED_FOR_PAIR);
3829 if (tmp & (1 << p_hwfn->rel_pf_id)) {
3830 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
3831 p_dev->num_hwfns = 2;
3833 p_dev->num_hwfns = 1;
3837 if (CHIP_REV_IS_EMUL(p_dev)) {
3838 /* For some reason we have problems with this register
3839 * in B0 emulation; Simply assume no CMT
3841 DP_NOTICE(p_dev->hwfns, false, "device on emul - assume no CMT\n");
3842 p_dev->num_hwfns = 1;
3846 p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
3847 MISCS_REG_CHIP_TEST_REG) >> 4;
3848 MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
3849 p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
3850 MISCS_REG_CHIP_METAL);
3851 MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
3852 DP_INFO(p_dev->hwfns,
3853 "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
3854 ECORE_IS_BB(p_dev) ? "BB" : "AH",
3855 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
3856 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
3859 if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
3860 DP_NOTICE(p_dev->hwfns, false,
3861 "The chip type/rev (BB A0) is not supported!\n");
3862 return ECORE_ABORTED;
3866 if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
3867 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
3868 MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
3870 if (CHIP_REV_IS_EMUL(p_dev)) {
3871 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
3872 MISCS_REG_ECO_RESERVED);
3873 if (tmp & (1 << 29)) {
3874 DP_NOTICE(p_hwfn, false, "Emulation: Running on a FULL build\n");
3875 p_dev->b_is_emul_full = true;
3877 DP_NOTICE(p_hwfn, false, "Emulation: Running on a REDUCED build\n");
3882 return ECORE_SUCCESS;
3885 void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev)
3892 for_each_hwfn(p_dev, j) {
3893 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
3895 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Mark hw/fw uninitialized\n");
3897 p_hwfn->hw_init_done = false;
3898 p_hwfn->first_on_engine = false;
3900 ecore_ptt_invalidate(p_hwfn);
3904 void ecore_hw_hibernate_resume(struct ecore_dev *p_dev)
3911 for_each_hwfn(p_dev, j) {
3912 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
3913 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
3915 ecore_hw_hwfn_prepare(p_hwfn);
3918 DP_NOTICE(p_hwfn, true, "ptt acquire failed\n");
3920 ecore_load_mcp_offsets(p_hwfn, p_ptt);
3921 ecore_ptt_release(p_hwfn, p_ptt);
3923 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "Reinitialized hw after low power state\n");
3927 static enum _ecore_status_t ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
3928 void OSAL_IOMEM *p_regview,
3929 void OSAL_IOMEM *p_doorbells,
3930 struct ecore_hw_prepare_params *p_params)
3932 struct ecore_mdump_retain_data mdump_retain;
3933 struct ecore_dev *p_dev = p_hwfn->p_dev;
3934 struct ecore_mdump_info mdump_info;
3935 enum _ecore_status_t rc = ECORE_SUCCESS;
3937 /* Split PCI bars evenly between hwfns */
3938 p_hwfn->regview = p_regview;
3939 p_hwfn->doorbells = p_doorbells;
3942 return ecore_vf_hw_prepare(p_hwfn);
3944 /* Validate that chip access is feasible */
3945 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
3946 DP_ERR(p_hwfn, "Reading the ME register returns all Fs; Preventing further chip access\n");
3947 if (p_params->b_relaxed_probe)
3948 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
3952 get_function_id(p_hwfn);
3954 /* Allocate PTT pool */
3955 rc = ecore_ptt_pool_alloc(p_hwfn);
3957 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
3958 if (p_params->b_relaxed_probe)
3959 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
3963 /* Allocate the main PTT */
3964 p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
3966 /* First hwfn learns basic information, e.g., number of hwfns */
3967 if (!p_hwfn->my_id) {
3968 rc = ecore_get_dev_info(p_dev);
3969 if (rc != ECORE_SUCCESS) {
3970 if (p_params->b_relaxed_probe)
3971 p_params->p_relaxed_res =
3972 ECORE_HW_PREPARE_FAILED_DEV;
3977 ecore_hw_hwfn_prepare(p_hwfn);
3979 /* Initialize MCP structure */
3980 rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
3982 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
3983 if (p_params->b_relaxed_probe)
3984 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
3988 /* Read the device configuration information from the HW and SHMEM */
3989 rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
3990 p_params->personality, p_params);
3992 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
3996 /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
3997 * called, since among others it sets the ports number in an engine.
3999 if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
4000 !p_dev->recov_in_prog) {
4001 rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
4002 if (rc != ECORE_SUCCESS)
4003 DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
4006 /* Check if mdump logs/data are present and update the epoch value */
4007 if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
4008 rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
4010 if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
4011 DP_NOTICE(p_hwfn, false,
4012 "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
4014 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
4016 if (rc == ECORE_SUCCESS && mdump_retain.valid)
4017 DP_NOTICE(p_hwfn, false,
4018 "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
4019 mdump_retain.epoch, mdump_retain.pf,
4020 mdump_retain.status);
4022 ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
4026 /* Allocate the init RT array and initialize the init-ops engine */
4027 rc = ecore_init_alloc(p_hwfn);
4029 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
4030 if (p_params->b_relaxed_probe)
4031 p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
4036 if (CHIP_REV_IS_FPGA(p_dev)) {
4037 DP_NOTICE(p_hwfn, false,
4038 "FPGA: workaround; Prevent DMAE parities\n");
4039 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
4042 DP_NOTICE(p_hwfn, false,
4043 "FPGA: workaround: Set VF bar0 size\n");
4044 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
4045 PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
4051 if (IS_LEAD_HWFN(p_hwfn))
4052 ecore_iov_free_hw_info(p_dev);
4053 ecore_mcp_free(p_hwfn);
4055 ecore_hw_hwfn_free(p_hwfn);
4060 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
4061 struct ecore_hw_prepare_params *p_params)
4063 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
4064 enum _ecore_status_t rc;
4066 p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
4067 p_dev->allow_mdump = p_params->allow_mdump;
4069 if (p_params->b_relaxed_probe)
4070 p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
4072 /* Store the precompiled init data ptrs */
4074 ecore_init_iro_array(p_dev);
4076 /* Initialize the first hwfn - will learn number of hwfns */
4077 rc = ecore_hw_prepare_single(p_hwfn,
4079 p_dev->doorbells, p_params);
4080 if (rc != ECORE_SUCCESS)
4083 p_params->personality = p_hwfn->hw_info.personality;
4085 /* initilalize 2nd hwfn if necessary */
4086 if (p_dev->num_hwfns > 1) {
4087 void OSAL_IOMEM *p_regview, *p_doorbell;
4088 u8 OSAL_IOMEM *addr;
4090 /* adjust bar offset for second engine */
4091 addr = (u8 OSAL_IOMEM *)p_dev->regview +
4092 ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
4093 p_regview = (void OSAL_IOMEM *)addr;
4095 addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
4096 ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
4097 p_doorbell = (void OSAL_IOMEM *)addr;
4099 /* prepare second hw function */
4100 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
4101 p_doorbell, p_params);
4103 /* in case of error, need to free the previously
4104 * initiliazed hwfn 0.
4106 if (rc != ECORE_SUCCESS) {
4107 if (p_params->b_relaxed_probe)
4108 p_params->p_relaxed_res =
4109 ECORE_HW_PREPARE_FAILED_ENG2;
4112 ecore_init_free(p_hwfn);
4113 ecore_mcp_free(p_hwfn);
4114 ecore_hw_hwfn_free(p_hwfn);
4116 DP_NOTICE(p_dev, true, "What do we need to free when VF hwfn1 init fails\n");
4125 void ecore_hw_remove(struct ecore_dev *p_dev)
4127 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
4131 ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
4132 ECORE_OV_DRIVER_STATE_NOT_LOADED);
4134 for_each_hwfn(p_dev, i) {
4135 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4138 ecore_vf_pf_release(p_hwfn);
4142 ecore_init_free(p_hwfn);
4143 ecore_hw_hwfn_free(p_hwfn);
4144 ecore_mcp_free(p_hwfn);
4146 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
4149 ecore_iov_free_hw_info(p_dev);
4152 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
4153 struct ecore_chain *p_chain)
4155 void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
4156 dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
4157 struct ecore_chain_next *p_next;
4163 size = p_chain->elem_size * p_chain->usable_per_page;
4165 for (i = 0; i < p_chain->page_cnt; i++) {
4169 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
4170 p_virt_next = p_next->next_virt;
4171 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
4173 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
4174 ECORE_CHAIN_PAGE_SIZE);
4176 p_virt = p_virt_next;
4177 p_phys = p_phys_next;
4181 static void ecore_chain_free_single(struct ecore_dev *p_dev,
4182 struct ecore_chain *p_chain)
4184 if (!p_chain->p_virt_addr)
4187 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
4188 p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
4191 static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
4192 struct ecore_chain *p_chain)
4194 void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
4195 u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
4196 u32 page_cnt = p_chain->page_cnt, i, pbl_size;
4198 if (!pp_virt_addr_tbl)
4204 for (i = 0; i < page_cnt; i++) {
4205 if (!pp_virt_addr_tbl[i])
4208 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
4209 *(dma_addr_t *)p_pbl_virt,
4210 ECORE_CHAIN_PAGE_SIZE);
4212 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
4215 pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
4217 if (!p_chain->b_external_pbl) {
4218 OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
4219 p_chain->pbl_sp.p_phys_table, pbl_size);
4222 OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
4223 p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
4226 void ecore_chain_free(struct ecore_dev *p_dev,
4227 struct ecore_chain *p_chain)
4229 switch (p_chain->mode) {
4230 case ECORE_CHAIN_MODE_NEXT_PTR:
4231 ecore_chain_free_next_ptr(p_dev, p_chain);
4233 case ECORE_CHAIN_MODE_SINGLE:
4234 ecore_chain_free_single(p_dev, p_chain);
4236 case ECORE_CHAIN_MODE_PBL:
4237 ecore_chain_free_pbl(p_dev, p_chain);
4242 static enum _ecore_status_t
4243 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
4244 enum ecore_chain_cnt_type cnt_type,
4245 osal_size_t elem_size, u32 page_cnt)
4247 u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
4249 /* The actual chain size can be larger than the maximal possible value
4250 * after rounding up the requested elements number to pages, and after
4251 * taking into acount the unusuable elements (next-ptr elements).
4252 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
4253 * size/capacity fields are of a u32 type.
4255 if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
4256 chain_size > ((u32)ECORE_U16_MAX + 1)) ||
4257 (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
4258 chain_size > ECORE_U32_MAX)) {
4259 DP_NOTICE(p_dev, true,
4260 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
4261 (unsigned long long)chain_size);
4265 return ECORE_SUCCESS;
4268 static enum _ecore_status_t
4269 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
4271 void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
4272 dma_addr_t p_phys = 0;
4275 for (i = 0; i < p_chain->page_cnt; i++) {
4276 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
4277 ECORE_CHAIN_PAGE_SIZE);
4279 DP_NOTICE(p_dev, true,
4280 "Failed to allocate chain memory\n");
4285 ecore_chain_init_mem(p_chain, p_virt, p_phys);
4286 ecore_chain_reset(p_chain);
4288 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
4292 p_virt_prev = p_virt;
4294 /* Last page's next element should point to the beginning of the
4297 ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
4298 p_chain->p_virt_addr,
4299 p_chain->p_phys_addr);
4301 return ECORE_SUCCESS;
4304 static enum _ecore_status_t
4305 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
4307 dma_addr_t p_phys = 0;
4308 void *p_virt = OSAL_NULL;
4310 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
4312 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
4316 ecore_chain_init_mem(p_chain, p_virt, p_phys);
4317 ecore_chain_reset(p_chain);
4319 return ECORE_SUCCESS;
4322 static enum _ecore_status_t
4323 ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
4324 struct ecore_chain *p_chain,
4325 struct ecore_chain_ext_pbl *ext_pbl)
4327 void *p_virt = OSAL_NULL;
4328 u8 *p_pbl_virt = OSAL_NULL;
4329 void **pp_virt_addr_tbl = OSAL_NULL;
4330 dma_addr_t p_phys = 0, p_pbl_phys = 0;
4331 u32 page_cnt = p_chain->page_cnt, size, i;
4333 size = page_cnt * sizeof(*pp_virt_addr_tbl);
4334 pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
4335 if (!pp_virt_addr_tbl) {
4336 DP_NOTICE(p_dev, true,
4337 "Failed to allocate memory for the chain virtual addresses table\n");
4341 /* The allocation of the PBL table is done with its full size, since it
4342 * is expected to be successive.
4343 * ecore_chain_init_pbl_mem() is called even in a case of an allocation
4344 * failure, since pp_virt_addr_tbl was previously allocated, and it
4345 * should be saved to allow its freeing during the error flow.
4347 size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
4349 if (ext_pbl == OSAL_NULL) {
4350 p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
4352 p_pbl_virt = ext_pbl->p_pbl_virt;
4353 p_pbl_phys = ext_pbl->p_pbl_phys;
4354 p_chain->b_external_pbl = true;
4357 ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
4360 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
4364 for (i = 0; i < page_cnt; i++) {
4365 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
4366 ECORE_CHAIN_PAGE_SIZE);
4368 DP_NOTICE(p_dev, true,
4369 "Failed to allocate chain memory\n");
4374 ecore_chain_init_mem(p_chain, p_virt, p_phys);
4375 ecore_chain_reset(p_chain);
4378 /* Fill the PBL table with the physical address of the page */
4379 *(dma_addr_t *)p_pbl_virt = p_phys;
4380 /* Keep the virtual address of the page */
4381 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
4383 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
4386 return ECORE_SUCCESS;
4389 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
4390 enum ecore_chain_use_mode intended_use,
4391 enum ecore_chain_mode mode,
4392 enum ecore_chain_cnt_type cnt_type,
4393 u32 num_elems, osal_size_t elem_size,
4394 struct ecore_chain *p_chain,
4395 struct ecore_chain_ext_pbl *ext_pbl)
4398 enum _ecore_status_t rc = ECORE_SUCCESS;
4400 if (mode == ECORE_CHAIN_MODE_SINGLE)
4403 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
4405 rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
4408 DP_NOTICE(p_dev, true,
4409 "Cannot allocate a chain with the given arguments:\n"
4410 "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
4411 intended_use, mode, cnt_type, num_elems, elem_size);
4415 ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
4416 mode, cnt_type, p_dev->dp_ctx);
4419 case ECORE_CHAIN_MODE_NEXT_PTR:
4420 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
4422 case ECORE_CHAIN_MODE_SINGLE:
4423 rc = ecore_chain_alloc_single(p_dev, p_chain);
4425 case ECORE_CHAIN_MODE_PBL:
4426 rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
4432 return ECORE_SUCCESS;
4435 ecore_chain_free(p_dev, p_chain);
4439 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
4440 u16 src_id, u16 *dst_id)
4442 if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
4445 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
4446 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
4447 DP_NOTICE(p_hwfn, true, "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
4453 *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
4455 return ECORE_SUCCESS;
4458 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
4459 u8 src_id, u8 *dst_id)
4461 if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
4464 min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
4465 max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
4466 DP_NOTICE(p_hwfn, true, "vport id [%d] is not valid, available indices [%d - %d]\n",
4472 *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
4474 return ECORE_SUCCESS;
4477 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
4478 u8 src_id, u8 *dst_id)
4480 if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
4483 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
4484 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
4485 DP_NOTICE(p_hwfn, true, "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
4491 *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
4493 return ECORE_SUCCESS;
4496 static enum _ecore_status_t
4497 ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
4498 struct ecore_ptt *p_ptt, u32 high, u32 low,
4504 /* Find a free entry and utilize it */
4505 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
4506 en = ecore_rd(p_hwfn, p_ptt,
4507 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
4511 ecore_wr(p_hwfn, p_ptt,
4512 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4513 2 * i * sizeof(u32), low);
4514 ecore_wr(p_hwfn, p_ptt,
4515 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4516 (2 * i + 1) * sizeof(u32), high);
4517 ecore_wr(p_hwfn, p_ptt,
4518 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
4519 i * sizeof(u32), 0);
4520 ecore_wr(p_hwfn, p_ptt,
4521 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
4522 i * sizeof(u32), 0);
4523 ecore_wr(p_hwfn, p_ptt,
4524 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
4525 i * sizeof(u32), 1);
4529 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
4530 return ECORE_NORESOURCES;
4534 return ECORE_SUCCESS;
4537 static enum _ecore_status_t
4538 ecore_llh_add_mac_filter_e5(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
4539 u32 high, u32 low, u32 *p_entry_num)
4541 ECORE_E5_MISSING_CODE;
4543 return ECORE_NOTIMPL;
4546 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
4547 struct ecore_ptt *p_ptt, u8 *p_filter)
4549 u32 high, low, entry_num;
4550 enum _ecore_status_t rc;
4552 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
4553 return ECORE_SUCCESS;
4555 high = p_filter[1] | (p_filter[0] << 8);
4556 low = p_filter[5] | (p_filter[4] << 8) |
4557 (p_filter[3] << 16) | (p_filter[2] << 24);
4559 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
4560 rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
4563 rc = ecore_llh_add_mac_filter_e5(p_hwfn, p_ptt, high, low,
4565 if (rc != ECORE_SUCCESS) {
4566 DP_NOTICE(p_hwfn, false,
4567 "Failed to find an empty LLH filter to utilize\n");
4571 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4572 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
4573 p_filter[0], p_filter[1], p_filter[2], p_filter[3],
4574 p_filter[4], p_filter[5], entry_num);
4576 return ECORE_SUCCESS;
4579 static enum _ecore_status_t
4580 ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
4581 struct ecore_ptt *p_ptt, u32 high, u32 low,
4586 /* Find the entry and clean it */
4587 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
4588 if (ecore_rd(p_hwfn, p_ptt,
4589 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4590 2 * i * sizeof(u32)) != low)
4592 if (ecore_rd(p_hwfn, p_ptt,
4593 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4594 (2 * i + 1) * sizeof(u32)) != high)
4597 ecore_wr(p_hwfn, p_ptt,
4598 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
4599 ecore_wr(p_hwfn, p_ptt,
4600 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4601 2 * i * sizeof(u32), 0);
4602 ecore_wr(p_hwfn, p_ptt,
4603 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4604 (2 * i + 1) * sizeof(u32), 0);
4608 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
4613 return ECORE_SUCCESS;
4616 static enum _ecore_status_t
4617 ecore_llh_remove_mac_filter_e5(struct ecore_hwfn *p_hwfn,
4618 struct ecore_ptt *p_ptt, u32 high, u32 low,
4621 ECORE_E5_MISSING_CODE;
4623 return ECORE_NOTIMPL;
4626 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
4627 struct ecore_ptt *p_ptt, u8 *p_filter)
4629 u32 high, low, entry_num;
4630 enum _ecore_status_t rc;
4632 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
4635 high = p_filter[1] | (p_filter[0] << 8);
4636 low = p_filter[5] | (p_filter[4] << 8) |
4637 (p_filter[3] << 16) | (p_filter[2] << 24);
4639 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
4640 rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
4643 rc = ecore_llh_remove_mac_filter_e5(p_hwfn, p_ptt, high, low,
4645 if (rc != ECORE_SUCCESS) {
4646 DP_NOTICE(p_hwfn, false,
4647 "Tried to remove a non-configured filter [MAC %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx]\n",
4648 p_filter[0], p_filter[1], p_filter[2], p_filter[3],
4649 p_filter[4], p_filter[5]);
4653 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4654 "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
4655 p_filter[0], p_filter[1], p_filter[2], p_filter[3],
4656 p_filter[4], p_filter[5], entry_num);
4659 static enum _ecore_status_t
4660 ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
4661 struct ecore_ptt *p_ptt,
4662 enum ecore_llh_port_filter_type_t type,
4663 u32 high, u32 low, u32 *p_entry_num)
4668 /* Find a free entry and utilize it */
4669 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
4670 en = ecore_rd(p_hwfn, p_ptt,
4671 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
4675 ecore_wr(p_hwfn, p_ptt,
4676 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4677 2 * i * sizeof(u32), low);
4678 ecore_wr(p_hwfn, p_ptt,
4679 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4680 (2 * i + 1) * sizeof(u32), high);
4681 ecore_wr(p_hwfn, p_ptt,
4682 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
4683 i * sizeof(u32), 1);
4684 ecore_wr(p_hwfn, p_ptt,
4685 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
4686 i * sizeof(u32), 1 << type);
4687 ecore_wr(p_hwfn, p_ptt,
4688 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
4692 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
4693 return ECORE_NORESOURCES;
4697 return ECORE_SUCCESS;
4700 static enum _ecore_status_t
4701 ecore_llh_add_protocol_filter_e5(struct ecore_hwfn *p_hwfn,
4702 struct ecore_ptt *p_ptt,
4703 enum ecore_llh_port_filter_type_t type,
4704 u32 high, u32 low, u32 *p_entry_num)
4706 ECORE_E5_MISSING_CODE;
4708 return ECORE_NOTIMPL;
4711 enum _ecore_status_t
4712 ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
4713 struct ecore_ptt *p_ptt,
4714 u16 source_port_or_eth_type,
4716 enum ecore_llh_port_filter_type_t type)
4718 u32 high, low, entry_num;
4719 enum _ecore_status_t rc;
4721 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
4722 return ECORE_SUCCESS;
4727 case ECORE_LLH_FILTER_ETHERTYPE:
4728 high = source_port_or_eth_type;
4730 case ECORE_LLH_FILTER_TCP_SRC_PORT:
4731 case ECORE_LLH_FILTER_UDP_SRC_PORT:
4732 low = source_port_or_eth_type << 16;
4734 case ECORE_LLH_FILTER_TCP_DEST_PORT:
4735 case ECORE_LLH_FILTER_UDP_DEST_PORT:
4738 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
4739 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
4740 low = (source_port_or_eth_type << 16) | dest_port;
4743 DP_NOTICE(p_hwfn, true,
4744 "Non valid LLH protocol filter type %d\n", type);
4748 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
4749 rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
4750 high, low, &entry_num);
4752 rc = ecore_llh_add_protocol_filter_e5(p_hwfn, p_ptt, type, high,
4754 if (rc != ECORE_SUCCESS) {
4755 DP_NOTICE(p_hwfn, false,
4756 "Failed to find an empty LLH filter to utilize\n");
4761 case ECORE_LLH_FILTER_ETHERTYPE:
4762 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4763 "ETH type %x is added at %d\n",
4764 source_port_or_eth_type, entry_num);
4766 case ECORE_LLH_FILTER_TCP_SRC_PORT:
4767 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4768 "TCP src port %x is added at %d\n",
4769 source_port_or_eth_type, entry_num);
4771 case ECORE_LLH_FILTER_UDP_SRC_PORT:
4772 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4773 "UDP src port %x is added at %d\n",
4774 source_port_or_eth_type, entry_num);
4776 case ECORE_LLH_FILTER_TCP_DEST_PORT:
4777 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4778 "TCP dst port %x is added at %d\n",
4779 dest_port, entry_num);
4781 case ECORE_LLH_FILTER_UDP_DEST_PORT:
4782 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4783 "UDP dst port %x is added at %d\n",
4784 dest_port, entry_num);
4786 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
4787 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4788 "TCP src/dst ports %x/%x are added at %d\n",
4789 source_port_or_eth_type, dest_port, entry_num);
4791 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
4792 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4793 "UDP src/dst ports %x/%x are added at %d\n",
4794 source_port_or_eth_type, dest_port, entry_num);
4798 return ECORE_SUCCESS;
4801 static enum _ecore_status_t
4802 ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
4803 struct ecore_ptt *p_ptt,
4804 enum ecore_llh_port_filter_type_t type,
4805 u32 high, u32 low, u32 *p_entry_num)
4809 /* Find the entry and clean it */
4810 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
4811 if (!ecore_rd(p_hwfn, p_ptt,
4812 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
4815 if (!ecore_rd(p_hwfn, p_ptt,
4816 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
4819 if (!(ecore_rd(p_hwfn, p_ptt,
4820 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
4821 i * sizeof(u32)) & (1 << type)))
4823 if (ecore_rd(p_hwfn, p_ptt,
4824 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4825 2 * i * sizeof(u32)) != low)
4827 if (ecore_rd(p_hwfn, p_ptt,
4828 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4829 (2 * i + 1) * sizeof(u32)) != high)
4832 ecore_wr(p_hwfn, p_ptt,
4833 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
4834 ecore_wr(p_hwfn, p_ptt,
4835 NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
4836 i * sizeof(u32), 0);
4837 ecore_wr(p_hwfn, p_ptt,
4838 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
4839 i * sizeof(u32), 0);
4840 ecore_wr(p_hwfn, p_ptt,
4841 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4842 2 * i * sizeof(u32), 0);
4843 ecore_wr(p_hwfn, p_ptt,
4844 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4845 (2 * i + 1) * sizeof(u32), 0);
4849 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
4854 return ECORE_SUCCESS;
4857 static enum _ecore_status_t
4858 ecore_llh_remove_protocol_filter_e5(struct ecore_hwfn *p_hwfn,
4859 struct ecore_ptt *p_ptt,
4860 enum ecore_llh_port_filter_type_t type,
4861 u32 high, u32 low, u32 *p_entry_num)
4863 ECORE_E5_MISSING_CODE;
4865 return ECORE_NOTIMPL;
4869 ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
4870 struct ecore_ptt *p_ptt,
4871 u16 source_port_or_eth_type,
4873 enum ecore_llh_port_filter_type_t type)
4875 u32 high, low, entry_num;
4876 enum _ecore_status_t rc;
4878 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
4884 case ECORE_LLH_FILTER_ETHERTYPE:
4885 high = source_port_or_eth_type;
4887 case ECORE_LLH_FILTER_TCP_SRC_PORT:
4888 case ECORE_LLH_FILTER_UDP_SRC_PORT:
4889 low = source_port_or_eth_type << 16;
4891 case ECORE_LLH_FILTER_TCP_DEST_PORT:
4892 case ECORE_LLH_FILTER_UDP_DEST_PORT:
4895 case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
4896 case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
4897 low = (source_port_or_eth_type << 16) | dest_port;
4900 DP_NOTICE(p_hwfn, true,
4901 "Non valid LLH protocol filter type %d\n", type);
4905 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
4906 rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
4910 rc = ecore_llh_remove_protocol_filter_e5(p_hwfn, p_ptt, type,
4911 high, low, &entry_num);
4912 if (rc != ECORE_SUCCESS) {
4913 DP_NOTICE(p_hwfn, false,
4914 "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
4915 type, source_port_or_eth_type, dest_port);
4919 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
4920 "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
4921 type, source_port_or_eth_type, dest_port, entry_num);
4924 static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
4925 struct ecore_ptt *p_ptt)
4929 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
4930 ecore_wr(p_hwfn, p_ptt,
4931 NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
4932 i * sizeof(u32), 0);
4933 ecore_wr(p_hwfn, p_ptt,
4934 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4935 2 * i * sizeof(u32), 0);
4936 ecore_wr(p_hwfn, p_ptt,
4937 NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
4938 (2 * i + 1) * sizeof(u32), 0);
4942 static void ecore_llh_clear_all_filters_e5(struct ecore_hwfn *p_hwfn,
4943 struct ecore_ptt *p_ptt)
4945 ECORE_E5_MISSING_CODE;
4948 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
4949 struct ecore_ptt *p_ptt)
4951 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
4954 if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
4955 ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
4957 ecore_llh_clear_all_filters_e5(p_hwfn, p_ptt);
4960 enum _ecore_status_t
4961 ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
4962 struct ecore_ptt *p_ptt)
4964 if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
4965 ecore_wr(p_hwfn, p_ptt,
4966 NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
4967 1 << p_hwfn->abs_pf_id / 2);
4968 ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
4969 return ECORE_SUCCESS;
4971 DP_NOTICE(p_hwfn, false,
4972 "This function can't be set as default\n");
4977 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
4978 struct ecore_ptt *p_ptt,
4979 u32 hw_addr, void *p_eth_qzone,
4980 osal_size_t eth_qzone_size,
4983 struct coalescing_timeset *p_coal_timeset;
4985 if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
4986 DP_NOTICE(p_hwfn, true,
4987 "Coalescing configuration not enabled\n");
4991 p_coal_timeset = p_eth_qzone;
4992 OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
4993 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
4994 SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
4995 ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
4997 return ECORE_SUCCESS;
5000 enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
5001 u16 rx_coal, u16 tx_coal,
5004 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
5005 enum _ecore_status_t rc = ECORE_SUCCESS;
5006 struct ecore_ptt *p_ptt;
5008 /* TODO - Configuring a single queue's coalescing but
5009 * claiming all queues are abiding same configuration
5010 * for PF and VF both.
5013 #ifdef CONFIG_ECORE_SRIOV
5014 if (IS_VF(p_hwfn->p_dev))
5015 return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
5017 #endif /* #ifdef CONFIG_ECORE_SRIOV */
5019 p_ptt = ecore_ptt_acquire(p_hwfn);
5024 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
5027 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
5031 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
5034 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
5037 ecore_ptt_release(p_hwfn, p_ptt);
5042 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
5043 struct ecore_ptt *p_ptt,
5045 struct ecore_queue_cid *p_cid)
5047 struct ustorm_eth_queue_zone eth_qzone;
5048 u8 timeset, timer_res;
5050 enum _ecore_status_t rc;
5052 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
5053 if (coalesce <= 0x7F)
5055 else if (coalesce <= 0xFF)
5057 else if (coalesce <= 0x1FF)
5060 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
5063 timeset = (u8)(coalesce >> timer_res);
5065 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
5066 p_cid->sb_igu_id, false);
5067 if (rc != ECORE_SUCCESS)
5070 address = BAR0_MAP_REG_USDM_RAM +
5071 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
5073 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone,
5074 sizeof(struct ustorm_eth_queue_zone), timeset);
5075 if (rc != ECORE_SUCCESS)
5082 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
5083 struct ecore_ptt *p_ptt,
5085 struct ecore_queue_cid *p_cid)
5087 struct xstorm_eth_queue_zone eth_qzone;
5088 u8 timeset, timer_res;
5090 enum _ecore_status_t rc;
5092 /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
5093 if (coalesce <= 0x7F)
5095 else if (coalesce <= 0xFF)
5097 else if (coalesce <= 0x1FF)
5100 DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
5103 timeset = (u8)(coalesce >> timer_res);
5105 rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
5106 p_cid->sb_igu_id, true);
5107 if (rc != ECORE_SUCCESS)
5110 address = BAR0_MAP_REG_XSDM_RAM +
5111 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
5113 rc = ecore_set_coalesce(p_hwfn, p_ptt, address, ð_qzone,
5114 sizeof(struct xstorm_eth_queue_zone), timeset);
5119 /* Calculate final WFQ values for all vports and configure it.
5120 * After this configuration each vport must have
5121 * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
5123 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
5124 struct ecore_ptt *p_ptt,
5127 struct init_qm_vport_params *vport_params;
5130 vport_params = p_hwfn->qm_info.qm_vport_params;
5132 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
5133 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
5135 vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
5137 ecore_init_vport_wfq(p_hwfn, p_ptt,
5138 vport_params[i].first_tx_pq_id,
5139 vport_params[i].vport_wfq);
5144 ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
5149 for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
5150 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
5153 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
5154 struct ecore_ptt *p_ptt,
5157 struct init_qm_vport_params *vport_params;
5160 vport_params = p_hwfn->qm_info.qm_vport_params;
5162 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
5163 ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
5164 ecore_init_vport_wfq(p_hwfn, p_ptt,
5165 vport_params[i].first_tx_pq_id,
5166 vport_params[i].vport_wfq);
5170 /* This function performs several validations for WFQ
5171 * configuration and required min rate for a given vport
5172 * 1. req_rate must be greater than one percent of min_pf_rate.
5173 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
5174 * rates to get less than one percent of min_pf_rate.
5175 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
5177 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
5178 u16 vport_id, u32 req_rate,
5181 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
5182 int non_requested_count = 0, req_count = 0, i, num_vports;
5184 num_vports = p_hwfn->qm_info.num_vports;
5186 /* Accounting for the vports which are configured for WFQ explicitly */
5187 for (i = 0; i < num_vports; i++) {
5190 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
5192 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
5193 total_req_min_rate += tmp_speed;
5197 /* Include current vport data as well */
5199 total_req_min_rate += req_rate;
5200 non_requested_count = num_vports - req_count;
5202 /* validate possible error cases */
5203 if (req_rate > min_pf_rate) {
5204 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5205 "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
5206 vport_id, req_rate, min_pf_rate);
5210 if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
5211 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5212 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
5213 vport_id, req_rate, min_pf_rate);
5217 /* TBD - for number of vports greater than 100 */
5218 if (num_vports > ECORE_WFQ_UNIT) {
5219 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5220 "Number of vports is greater than %d\n",
5225 if (total_req_min_rate > min_pf_rate) {
5226 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5227 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
5228 total_req_min_rate, min_pf_rate);
5232 /* Data left for non requested vports */
5233 total_left_rate = min_pf_rate - total_req_min_rate;
5234 left_rate_per_vp = total_left_rate / non_requested_count;
5236 /* validate if non requested get < 1% of min bw */
5237 if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
5238 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5239 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
5240 left_rate_per_vp, min_pf_rate);
5244 /* now req_rate for given vport passes all scenarios.
5245 * assign final wfq rates to all vports.
5247 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
5248 p_hwfn->qm_info.wfq_data[vport_id].configured = true;
5250 for (i = 0; i < num_vports; i++) {
5251 if (p_hwfn->qm_info.wfq_data[i].configured)
5254 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
5257 return ECORE_SUCCESS;
5260 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
5261 struct ecore_ptt *p_ptt,
5262 u16 vp_id, u32 rate)
5264 struct ecore_mcp_link_state *p_link;
5265 int rc = ECORE_SUCCESS;
5267 p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
5269 if (!p_link->min_pf_rate) {
5270 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
5271 p_hwfn->qm_info.wfq_data[vp_id].configured = true;
5275 rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
5277 if (rc == ECORE_SUCCESS)
5278 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
5279 p_link->min_pf_rate);
5281 DP_NOTICE(p_hwfn, false,
5282 "Validation failed while configuring min rate\n");
5287 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
5288 struct ecore_ptt *p_ptt,
5291 bool use_wfq = false;
5292 int rc = ECORE_SUCCESS;
5295 /* Validate all pre configured vports for wfq */
5296 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
5299 if (!p_hwfn->qm_info.wfq_data[i].configured)
5302 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
5305 rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
5306 if (rc != ECORE_SUCCESS) {
5307 DP_NOTICE(p_hwfn, false,
5308 "WFQ validation failed while configuring min rate\n");
5313 if (rc == ECORE_SUCCESS && use_wfq)
5314 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
5316 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
5321 /* Main API for ecore clients to configure vport min rate.
5322 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
5323 * rate - Speed in Mbps needs to be assigned to a given vport.
5325 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
5327 int i, rc = ECORE_INVAL;
5329 /* TBD - for multiple hardware functions - that is 100 gig */
5330 if (p_dev->num_hwfns > 1) {
5331 DP_NOTICE(p_dev, false,
5332 "WFQ configuration is not supported for this device\n");
5336 for_each_hwfn(p_dev, i) {
5337 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
5338 struct ecore_ptt *p_ptt;
5340 p_ptt = ecore_ptt_acquire(p_hwfn);
5342 return ECORE_TIMEOUT;
5344 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
5346 if (rc != ECORE_SUCCESS) {
5347 ecore_ptt_release(p_hwfn, p_ptt);
5351 ecore_ptt_release(p_hwfn, p_ptt);
5357 /* API to configure WFQ from mcp link change */
5358 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
5359 struct ecore_ptt *p_ptt,
5364 /* TBD - for multiple hardware functions - that is 100 gig */
5365 if (p_dev->num_hwfns > 1) {
5366 DP_VERBOSE(p_dev, ECORE_MSG_LINK,
5367 "WFQ configuration is not supported for this device\n");
5371 for_each_hwfn(p_dev, i) {
5372 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
5374 __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
5379 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
5380 struct ecore_ptt *p_ptt,
5381 struct ecore_mcp_link_state *p_link,
5384 int rc = ECORE_SUCCESS;
5386 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
5388 if (!p_link->line_speed && (max_bw != 100))
5391 p_link->speed = (p_link->line_speed * max_bw) / 100;
5392 p_hwfn->qm_info.pf_rl = p_link->speed;
5394 /* Since the limiter also affects Tx-switched traffic, we don't want it
5395 * to limit such traffic in case there's no actual limit.
5396 * In that case, set limit to imaginary high boundary.
5399 p_hwfn->qm_info.pf_rl = 100000;
5401 rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
5402 p_hwfn->qm_info.pf_rl);
5404 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5405 "Configured MAX bandwidth to be %08x Mb/sec\n",
5411 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
5412 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
5414 int i, rc = ECORE_INVAL;
5416 if (max_bw < 1 || max_bw > 100) {
5417 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
5421 for_each_hwfn(p_dev, i) {
5422 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
5423 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
5424 struct ecore_mcp_link_state *p_link;
5425 struct ecore_ptt *p_ptt;
5427 p_link = &p_lead->mcp_info->link_output;
5429 p_ptt = ecore_ptt_acquire(p_hwfn);
5431 return ECORE_TIMEOUT;
5433 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
5436 ecore_ptt_release(p_hwfn, p_ptt);
5438 if (rc != ECORE_SUCCESS)
5445 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
5446 struct ecore_ptt *p_ptt,
5447 struct ecore_mcp_link_state *p_link,
5450 int rc = ECORE_SUCCESS;
5452 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
5453 p_hwfn->qm_info.pf_wfq = min_bw;
5455 if (!p_link->line_speed)
5458 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
5460 rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
5462 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
5463 "Configured MIN bandwidth to be %d Mb/sec\n",
5464 p_link->min_pf_rate);
5469 /* Main API to configure PF min bandwidth where bw range is [1-100] */
5470 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
5472 int i, rc = ECORE_INVAL;
5474 if (min_bw < 1 || min_bw > 100) {
5475 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
5479 for_each_hwfn(p_dev, i) {
5480 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
5481 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
5482 struct ecore_mcp_link_state *p_link;
5483 struct ecore_ptt *p_ptt;
5485 p_link = &p_lead->mcp_info->link_output;
5487 p_ptt = ecore_ptt_acquire(p_hwfn);
5489 return ECORE_TIMEOUT;
5491 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
5493 if (rc != ECORE_SUCCESS) {
5494 ecore_ptt_release(p_hwfn, p_ptt);
5498 if (p_link->min_pf_rate) {
5499 u32 min_rate = p_link->min_pf_rate;
5501 rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
5506 ecore_ptt_release(p_hwfn, p_ptt);
5512 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
5514 struct ecore_mcp_link_state *p_link;
5516 p_link = &p_hwfn->mcp_info->link_output;
5518 if (p_link->min_pf_rate)
5519 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
5520 p_link->min_pf_rate);
5522 OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
5523 sizeof(*p_hwfn->qm_info.wfq_data) *
5524 p_hwfn->qm_info.num_vports);
5527 int ecore_device_num_engines(struct ecore_dev *p_dev)
5529 return ECORE_IS_BB(p_dev) ? 2 : 1;
5532 int ecore_device_num_ports(struct ecore_dev *p_dev)
5534 /* in CMT always only one port */
5535 if (p_dev->num_hwfns > 1)
5538 return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
5541 void ecore_set_fw_mac_addr(__le16 *fw_msb,
5546 ((u8 *)fw_msb)[0] = mac[1];
5547 ((u8 *)fw_msb)[1] = mac[0];
5548 ((u8 *)fw_mid)[0] = mac[3];
5549 ((u8 *)fw_mid)[1] = mac[2];
5550 ((u8 *)fw_lsb)[0] = mac[5];
5551 ((u8 *)fw_lsb)[1] = mac[4];