2 ********************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
5 This file may be licensed under the terms of the Annapurna Labs Commercial
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
16 * Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *******************************************************************************/
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
44 #include "al_hal_pcie.h"
45 #include "al_hal_pbs_regs.h"
46 #include "al_hal_unit_adapter_regs.h"
49 * Parameter definitions
51 #define AL_PCIE_AXI_REGS_OFFSET 0x0
53 #define AL_PCIE_LTSSM_STATE_L0 0x11
54 #define AL_PCIE_LTSSM_STATE_L0S 0x12
55 #define AL_PCIE_DEVCTL_PAYLOAD_128B 0x00
56 #define AL_PCIE_DEVCTL_PAYLOAD_256B 0x20
58 #define AL_PCIE_SECBUS_DEFAULT 0x1
59 #define AL_PCIE_SUBBUS_DEFAULT 0x1
60 #define AL_PCIE_LINKUP_WAIT_INTERVAL 50 /* measured in usec */
61 #define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC 20
63 #define AL_PCIE_LINKUP_RETRIES 8
65 #define AL_PCIE_MAX_32_MEMORY_BAR_SIZE (0x100000000ULL)
66 #define AL_PCIE_MIN_MEMORY_BAR_SIZE (1 << 12)
67 #define AL_PCIE_MIN_IO_BAR_SIZE (1 << 8)
70 * inbound header credits and outstanding outbound reads defaults
72 /** RC - Revisions 1/2 */
73 #define AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT (8)
74 #define AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT (41)
75 #define AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT (25)
76 #define AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT (31)
77 /** EP - Revisions 1/2 */
78 #define AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT (15)
79 #define AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT (76)
80 #define AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT (6)
81 #define AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT (15)
82 /** RC - Revision 3 */
83 #define AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT (32)
84 #define AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT (161)
85 #define AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT (38)
86 #define AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT (60)
87 /** EP - Revision 3 */
88 #define AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT (32)
89 #define AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT (161)
90 #define AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT (38)
91 #define AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT (60)
96 #define AL_PCIE_PARSE_LANES(v) (((1 << v) - 1) << \
97 PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
103 al_pcie_port_wr_to_ro_set(struct al_pcie_port *pcie_port, al_bool enable)
105 /* when disabling writes to RO, make sure any previous writes to
106 * config space were committed
108 if (enable == AL_FALSE)
109 al_local_data_memory_barrier();
111 al_reg_write32(&pcie_port->regs->port_regs->rd_only_wr_en,
112 (enable == AL_TRUE) ? 1 : 0);
114 /* when enabling writes to RO, make sure it is committed before trying
115 * to write to RO config space
117 if (enable == AL_TRUE)
118 al_local_data_memory_barrier();
121 /** helper function to access dbi_cs2 registers */
123 al_reg_write32_dbi_cs2(
124 struct al_pcie_port *pcie_port,
129 (pcie_port->rev_id == AL_PCIE_REV_ID_3) ? 0x4000 : 0x1000;
131 al_reg_write32((uint32_t *)((uintptr_t)offset | cs2_bit), val);
135 al_pcie_speed_gen_code(enum al_pcie_link_speed speed)
137 if (speed == AL_PCIE_LINK_SPEED_GEN1)
139 if (speed == AL_PCIE_LINK_SPEED_GEN2)
141 if (speed == AL_PCIE_LINK_SPEED_GEN3)
143 /* must not be reached */
148 al_pcie_port_link_speed_ctrl_set(
149 struct al_pcie_port *pcie_port,
150 enum al_pcie_link_speed max_speed)
152 struct al_pcie_regs *regs = pcie_port->regs;
154 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
156 if (max_speed != AL_PCIE_LINK_SPEED_DEFAULT) {
157 uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(max_speed);
158 al_reg_write32_masked(
159 (uint32_t __iomem *)(regs->core_space[0].pcie_link_cap_base),
161 al_reg_write32_masked(
162 (uint32_t __iomem *)(regs->core_space[0].pcie_cap_base
163 + (AL_PCI_EXP_LNKCTL2 >> 2)),
167 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
171 al_pcie_port_link_config(
172 struct al_pcie_port *pcie_port,
173 const struct al_pcie_link_params *link_params)
175 struct al_pcie_regs *regs = pcie_port->regs;
176 uint8_t max_lanes = pcie_port->max_lanes;
178 if ((link_params->max_payload_size != AL_PCIE_MPS_DEFAULT) &&
179 (link_params->max_payload_size != AL_PCIE_MPS_128) &&
180 (link_params->max_payload_size != AL_PCIE_MPS_256)) {
181 al_err("PCIe %d: unsupported Max Payload Size (%u)\n",
182 pcie_port->port_id, link_params->max_payload_size);
186 al_dbg("PCIe %d: link config: max speed gen %d, max lanes %d, reversal %s\n",
187 pcie_port->port_id, link_params->max_speed,
188 pcie_port->max_lanes, link_params->enable_reversal? "enable" : "disable");
190 al_pcie_port_link_speed_ctrl_set(pcie_port, link_params->max_speed);
192 /* Change Max Payload Size, if needed.
193 * The Max Payload Size is only valid for PF0.
195 if (link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)
196 al_reg_write32_masked(regs->core_space[0].pcie_dev_ctrl_status,
197 PCIE_PORT_DEV_CTRL_STATUS_MPS_MASK,
198 link_params->max_payload_size <<
199 PCIE_PORT_DEV_CTRL_STATUS_MPS_SHIFT);
201 /** Snap from PCIe core spec:
202 * Link Mode Enable. Sets the number of lanes in the link that you want
203 * to connect to the link partner. When you have unused lanes in your
204 * system, then you must change the value in this register to reflect
205 * the number of lanes. You must also change the value in the
206 * "Predetermined Number of Lanes" field of the "Link Width and Speed
207 * Change Control Register".
213 * 111111: x32 (not supported)
215 al_reg_write32_masked(®s->port_regs->gen2_ctrl,
216 PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_MASK,
217 max_lanes << PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_SHIFT);
218 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
219 PCIE_PORT_LINK_CTRL_LINK_CAPABLE_MASK,
220 (max_lanes + (max_lanes-1))
221 << PCIE_PORT_LINK_CTRL_LINK_CAPABLE_SHIFT);
223 /* TODO: add support for reversal mode */
224 if (link_params->enable_reversal) {
225 al_err("PCIe %d: enabling reversal mode not implemented\n",
233 al_pcie_port_ram_parity_int_config(
234 struct al_pcie_port *pcie_port,
237 struct al_pcie_regs *regs = pcie_port->regs;
239 al_reg_write32(®s->app.parity->en_core,
240 (enable == AL_TRUE) ? 0xffffffff : 0x0);
242 al_reg_write32_masked(®s->app.int_grp_b->mask,
243 PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE,
244 (enable != AL_TRUE) ?
245 PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE : 0);
250 al_pcie_port_axi_parity_int_config(
251 struct al_pcie_port *pcie_port,
254 struct al_pcie_regs *regs = pcie_port->regs;
255 uint32_t parity_enable_mask = 0xffffffff;
258 * Addressing RMN: 5603
261 * u4_ram2p signal false parity error
264 * Disable parity check for this memory
266 if (pcie_port->rev_id >= AL_PCIE_REV_ID_3)
267 parity_enable_mask &= ~PCIE_AXI_PARITY_EN_AXI_U4_RAM2P;
269 al_reg_write32(regs->axi.parity.en_axi,
270 (enable == AL_TRUE) ? parity_enable_mask : 0x0);
272 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
273 al_reg_write32_masked(regs->axi.ctrl.global,
274 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
275 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
276 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
277 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
278 (enable == AL_TRUE) ?
279 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
280 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
281 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
282 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
283 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
285 al_reg_write32_masked(regs->axi.ctrl.global,
286 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
287 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
288 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
289 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
290 (enable == AL_TRUE) ?
291 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
292 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
293 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
294 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
295 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
298 al_reg_write32_masked(®s->axi.int_grp_a->mask,
299 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
300 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
301 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
302 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
303 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI,
304 (enable != AL_TRUE) ?
305 (PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
306 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
307 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
308 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
309 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI) : 0);
313 al_pcie_port_relaxed_pcie_ordering_config(
314 struct al_pcie_port *pcie_port,
315 struct al_pcie_relaxed_ordering_params *relaxed_ordering_params)
317 struct al_pcie_regs *regs = pcie_port->regs;
318 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
321 * - RC: Rx relaxed ordering only
322 * - EP: TX relaxed ordering only
324 al_bool tx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_FALSE : AL_TRUE);
325 al_bool rx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_TRUE : AL_FALSE);
327 if (relaxed_ordering_params) {
328 tx_relaxed_ordering = relaxed_ordering_params->enable_tx_relaxed_ordering;
329 rx_relaxed_ordering = relaxed_ordering_params->enable_rx_relaxed_ordering;
333 * - disable outbound completion must be stalled behind outbound write
334 * ordering rule enforcement is disabled for root-port
335 * - disables read completion on the master port push slave writes for end-point
337 al_reg_write32_masked(
338 regs->axi.ordering.pos_cntl,
339 PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
340 PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
341 PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS |
342 PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES,
343 (tx_relaxed_ordering ?
344 (PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
345 PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES) : 0) |
346 (rx_relaxed_ordering ?
347 (PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
348 PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS) : 0));
353 void __iomem *pbs_reg_base,
354 void __iomem *pcie_reg_base)
357 uint16_t chip_id_dev;
359 struct al_pbs_regs *pbs_regs = pbs_reg_base;
361 /* get revision ID from PBS' chip_id register */
362 chip_id = al_reg_read32(&pbs_regs->unit.chip_id);
363 chip_id_dev = AL_REG_FIELD_GET(chip_id,
364 PBS_UNIT_CHIP_ID_DEV_ID_MASK,
365 PBS_UNIT_CHIP_ID_DEV_ID_SHIFT);
367 if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE) {
368 rev_id = AL_REG_FIELD_GET(
370 PBS_UNIT_CHIP_ID_DEV_REV_ID_MASK,
371 PBS_UNIT_CHIP_ID_DEV_REV_ID_SHIFT);
372 } else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_PEAKROCK) {
373 struct al_pcie_revx_regs __iomem *regs =
374 (struct al_pcie_revx_regs __iomem *)pcie_reg_base;
377 dev_id = al_reg_read32(®s->axi.device_id.device_rev_id) &
378 PCIE_AXI_DEVICE_ID_REG_DEV_ID_MASK;
379 if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X4) {
380 rev_id = AL_PCIE_REV_ID_2;
381 } else if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X8) {
382 rev_id = AL_PCIE_REV_ID_3;
384 al_warn("%s: Revision ID is unknown\n",
389 al_warn("%s: Revision ID is unknown\n",
397 al_pcie_port_lat_rply_timers_config(
398 struct al_pcie_port *pcie_port,
399 const struct al_pcie_latency_replay_timers *lat_rply_timers)
401 struct al_pcie_regs *regs = pcie_port->regs;
404 AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit);
405 AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit);
407 al_reg_write32(®s->port_regs->ack_lat_rply_timer, reg);
412 al_pcie_ib_hcrd_os_ob_reads_config_default(
413 struct al_pcie_port *pcie_port)
416 struct al_pcie_ib_hcrd_os_ob_reads_config ib_hcrd_os_ob_reads_config;
418 switch (al_pcie_operating_mode_get(pcie_port)) {
419 case AL_PCIE_OPERATING_MODE_RC:
420 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
421 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
422 AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT;
423 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
424 AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT;
425 ib_hcrd_os_ob_reads_config.nof_np_hdr =
426 AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT;
427 ib_hcrd_os_ob_reads_config.nof_p_hdr =
428 AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT;
430 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
431 AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT;
432 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
433 AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT;
434 ib_hcrd_os_ob_reads_config.nof_np_hdr =
435 AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT;
436 ib_hcrd_os_ob_reads_config.nof_p_hdr =
437 AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT;
441 case AL_PCIE_OPERATING_MODE_EP:
442 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
443 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
444 AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT;
445 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
446 AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT;
447 ib_hcrd_os_ob_reads_config.nof_np_hdr =
448 AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT;
449 ib_hcrd_os_ob_reads_config.nof_p_hdr =
450 AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT;
452 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
453 AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT;
454 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
455 AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT;
456 ib_hcrd_os_ob_reads_config.nof_np_hdr =
457 AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT;
458 ib_hcrd_os_ob_reads_config.nof_p_hdr =
459 AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT;
464 al_err("PCIe %d: outstanding outbound transactions could not be configured - unknown operating mode\n",
469 al_pcie_port_ib_hcrd_os_ob_reads_config(pcie_port, &ib_hcrd_os_ob_reads_config);
472 /** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
474 al_pcie_is_link_started(struct al_pcie_port *pcie_port)
476 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
478 uint32_t port_init = al_reg_read32(regs->app.global_ctrl.port_init);
479 uint8_t ltssm_en = AL_REG_FIELD_GET(port_init,
480 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
481 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
486 /** return AL_TRUE if link is up, AL_FALSE otherwise */
489 struct al_pcie_port *pcie_port,
492 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
496 info_0 = al_reg_read32(®s->app.debug->info_0);
498 ltssm_state = AL_REG_FIELD_GET(info_0,
499 PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK,
500 PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT);
502 al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n",
503 pcie_port->port_id, info_0, ltssm_state);
506 *ltssm_ret = ltssm_state;
508 if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) ||
509 (ltssm_state == AL_PCIE_LTSSM_STATE_L0S))
515 al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port,
516 const struct al_pcie_gen2_params *gen2_params)
518 struct al_pcie_regs *regs = pcie_port->regs;
521 al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n",
523 gen2_params->tx_swing_low ? "Low" : "Full",
524 gen2_params->tx_compliance_receive_enable? "enable" : "disable",
525 gen2_params->set_deemphasis? "enable" : "disable");
527 gen2_ctrl = al_reg_read32(®s->port_regs->gen2_ctrl);
529 if (gen2_params->tx_swing_low)
530 AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
532 AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
534 if (gen2_params->tx_compliance_receive_enable)
535 AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
537 AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
539 if (gen2_params->set_deemphasis)
540 AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
542 AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
544 al_reg_write32(®s->port_regs->gen2_ctrl, gen2_ctrl);
551 gen3_lane_eq_param_to_val(const struct al_pcie_gen3_lane_eq_params *eq_params)
553 uint16_t eq_control = 0;
555 eq_control = eq_params->downstream_port_transmitter_preset & 0xF;
556 eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4;
557 eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8;
558 eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12;
564 al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
565 const struct al_pcie_gen3_params *gen3_params)
567 struct al_pcie_regs *regs = pcie_port->regs;
569 uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(regs->core_space[0].pcie_sec_ext_cap_base + (0xC >> 2));
572 al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n",
574 gen3_params->perform_eq ? "enable" : "disable",
575 gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable");
577 if (gen3_params->perform_eq)
578 AL_REG_BIT_SET(reg, 0);
579 if (gen3_params->interrupt_enable_on_link_eq_request)
580 AL_REG_BIT_SET(reg, 1);
582 al_reg_write32(regs->core_space[0].pcie_sec_ext_cap_base + (4 >> 2),
585 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
587 for (i = 0; i < gen3_params->eq_params_elements; i += 2) {
588 uint32_t eq_control =
589 (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) |
590 (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16;
592 al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1);
593 al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control);
596 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
598 reg = al_reg_read32(®s->port_regs->gen3_ctrl);
599 if (gen3_params->eq_disable)
600 AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
602 AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
604 if (gen3_params->eq_phase2_3_disable)
605 AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
607 AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
609 al_reg_write32(®s->port_regs->gen3_ctrl, reg);
612 AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK,
613 PCIE_PORT_GEN3_EQ_LF_SHIFT,
614 gen3_params->local_lf);
615 AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK,
616 PCIE_PORT_GEN3_EQ_FS_SHIFT,
617 gen3_params->local_fs);
619 al_reg_write32(®s->port_regs->gen3_eq_fs_lf, reg);
622 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK,
623 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT,
624 gen3_params->local_lf);
625 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK,
626 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT,
627 gen3_params->local_fs);
628 al_reg_write32(regs->axi.conf.zero_lane0, reg);
629 al_reg_write32(regs->axi.conf.zero_lane1, reg);
630 al_reg_write32(regs->axi.conf.zero_lane2, reg);
631 al_reg_write32(regs->axi.conf.zero_lane3, reg);
632 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
633 al_reg_write32(regs->axi.conf.zero_lane4, reg);
634 al_reg_write32(regs->axi.conf.zero_lane5, reg);
635 al_reg_write32(regs->axi.conf.zero_lane6, reg);
636 al_reg_write32(regs->axi.conf.zero_lane7, reg);
640 * Gen3 EQ Control Register:
641 * - Preset Request Vector - request 9
642 * - Behavior After 24 ms Timeout (when optimal settings are not
643 * found): Recovery.Equalization.RcvrLock
644 * - Phase2_3 2 ms Timeout Disable
645 * - Feedback Mode - Figure Of Merit
648 al_reg_write32(®s->port_regs->gen3_eq_ctrl, reg);
654 al_pcie_port_tl_credits_config(
655 struct al_pcie_port *pcie_port,
656 const struct al_pcie_tl_credits_params *tl_credits __attribute__((__unused__)))
658 al_err("PCIe %d: transport layer credits config not implemented\n",
666 al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
667 const struct al_pcie_pf_config_params *pf_params)
669 struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
670 struct al_pcie_regs *regs = pcie_port->regs;
671 unsigned int pf_num = pcie_pf->pf_num;
675 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
677 /* Disable D1 and D3hot capabilities */
678 if (pf_params->cap_d1_d3hot_dis)
679 al_reg_write32_masked(
680 regs->core_space[pf_num].pcie_pm_cap_base,
681 AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
683 /* Disable FLR capability */
684 if (pf_params->cap_flr_dis)
685 al_reg_write32_masked(
686 regs->core_space[pf_num].pcie_dev_cap_base,
689 /* Disable ASPM capability */
690 if (pf_params->cap_aspm_dis) {
691 al_reg_write32_masked(
692 regs->core_space[pf_num].pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
693 AL_PCI_EXP_LNKCAP_ASPMS, 0);
694 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
695 al_warn("%s: ASPM support is enabled, please disable it\n",
701 if (!pf_params->bar_params_valid) {
706 for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */
707 const struct al_pcie_ep_bar_params *params = pf_params->bar_params + bar_idx;
710 uint32_t __iomem *bar_addr = ®s->core_space[pf_num].config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx];
712 if (params->enable) {
713 uint64_t size = params->size;
715 if (params->memory_64_bit) {
716 const struct al_pcie_ep_bar_params *next_params = params + 1;
717 /* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */
723 /* next BAR must be disabled */
724 if (next_params->enable) {
729 /* 64 bar must be memory bar */
730 if (!params->memory_space) {
735 if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE)
737 /* 32 bit space can't be prefetchable */
738 if (params->memory_is_prefetchable) {
744 if (params->memory_space) {
745 if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
746 al_err("PCIe %d: memory BAR %d: size (0x%llx) less that minimal allowed value\n",
747 pcie_port->port_id, bar_idx, size);
752 /* IO can't be prefetchable */
753 if (params->memory_is_prefetchable) {
758 if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
759 al_err("PCIe %d: IO BAR %d: size (0x%llx) less that minimal allowed value\n",
760 pcie_port->port_id, bar_idx, size);
766 /* size must be power of 2 */
767 if (size & (size - 1)) {
768 al_err("PCIe %d: BAR %d:size (0x%llx) must be "
770 pcie_port->port_id, bar_idx, size);
775 /* If BAR is 64-bit, disable the next BAR before
776 * configuring this one
778 if (params->memory_64_bit)
779 al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, 0);
781 mask = 1; /* enable bit*/
782 mask |= (params->size - 1) & 0xFFFFFFFF;
784 al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
786 if (params->memory_space == AL_FALSE)
787 ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO;
788 if (params->memory_64_bit)
789 ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64;
790 if (params->memory_is_prefetchable)
791 ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH;
792 al_reg_write32(bar_addr, ctrl);
794 if (params->memory_64_bit) {
795 mask = ((params->size - 1) >> 32) & 0xFFFFFFFF;
796 al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, mask);
800 al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
802 if (params->enable && params->memory_64_bit)
808 if (pf_params->exp_bar_params.enable) {
809 if (pcie_port->rev_id != AL_PCIE_REV_ID_3) {
810 al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id);
815 uint32_t __iomem *exp_rom_bar_addr =
816 ®s->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
817 uint32_t mask = 1; /* enable bit*/
818 mask |= (pf_params->exp_bar_params.size - 1) & 0xFFFFFFFF;
819 al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , mask);
821 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
822 /* Disable exp ROM */
823 uint32_t __iomem *exp_rom_bar_addr =
824 ®s->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
825 al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , 0);
828 /* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
829 if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
830 (pcie_port->rev_id == AL_PCIE_REV_ID_1)) {
831 al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_0, (1 << 21));
832 } else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
833 (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
834 al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_3, (1 << 18));
842 * Addressing RMN: 1547
845 * 1. Whenever writing to 0x2xx offset, the write also happens to
846 * 0x3xx address, meaning two registers are written instead of one.
847 * 2. Read and write from 0x3xx work ok.
850 * Backup the value of the app.int_grp_a.mask_a register, because
851 * app.int_grp_a.mask_clear_a gets overwritten during the write to
852 * app.soc.mask_msi_leg_0 register.
853 * Restore the original value after the write to app.soc.mask_msi_leg_0
856 if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
859 backup = al_reg_read32(®s->app.int_grp_a->mask);
860 al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
861 al_reg_write32(®s->app.int_grp_a->mask, backup);
862 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
863 al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
864 } else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
865 (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
866 al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_3, (1 << 19));
876 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
882 al_pcie_port_features_config(
883 struct al_pcie_port *pcie_port,
884 const struct al_pcie_features *features)
886 struct al_pcie_regs *regs = pcie_port->regs;
888 al_assert(pcie_port->rev_id > AL_PCIE_REV_ID_0);
890 al_reg_write32_masked(
891 ®s->app.ctrl_gen->features,
892 PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX,
893 features->sata_ep_msi_fix ?
894 PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX : 0);
898 al_pcie_port_sris_config(
899 struct al_pcie_port *pcie_port,
900 struct al_pcie_sris_params *sris_params,
901 enum al_pcie_link_speed link_speed)
904 struct al_pcie_regs *regs = pcie_port->regs;
906 if (sris_params->use_defaults) {
907 sris_params->kp_counter_gen3 = (pcie_port->rev_id > AL_PCIE_REV_ID_1) ?
908 PCIE_SRIS_KP_COUNTER_GEN3_DEFAULT_VAL : 0;
909 sris_params->kp_counter_gen21 = PCIE_SRIS_KP_COUNTER_GEN21_DEFAULT_VAL;
911 al_dbg("PCIe %d: configuring SRIS with default values kp_gen3[%d] kp_gen21[%d]\n",
913 sris_params->kp_counter_gen3,
914 sris_params->kp_counter_gen21);
917 switch (pcie_port->rev_id) {
918 case AL_PCIE_REV_ID_3:
919 case AL_PCIE_REV_ID_2:
920 al_reg_write32_masked(regs->app.global_ctrl.sris_kp_counter,
921 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_MASK |
922 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_MASK |
923 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN,
924 (sris_params->kp_counter_gen3 <<
925 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_SHIFT) |
926 (sris_params->kp_counter_gen21 <<
927 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_SHIFT) |
928 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN);
931 case AL_PCIE_REV_ID_1:
932 if ((link_speed == AL_PCIE_LINK_SPEED_GEN3) && (sris_params->kp_counter_gen3)) {
933 al_err("PCIe %d: cannot config Gen%d SRIS with rev_id[%d]\n",
934 pcie_port->port_id, al_pcie_speed_gen_code(link_speed),
939 al_reg_write32_masked(®s->port_regs->filter_mask_reg_1,
940 PCIE_FLT_MASK_SKP_INT_VAL_MASK,
941 sris_params->kp_counter_gen21);
945 al_err("PCIe %d: SRIS config is not supported in rev_id[%d]\n",
946 pcie_port->port_id, pcie_port->rev_id);
955 al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port)
957 struct al_pcie_regs *regs = pcie_port->regs;
959 al_reg_write32_masked(
960 ®s->port_regs->vc0_posted_rcv_q_ctrl,
961 RADM_PQ_HCRD_VC0_MASK,
962 (pcie_port->ib_hcrd_config.nof_p_hdr - 1)
963 << RADM_PQ_HCRD_VC0_SHIFT);
965 al_reg_write32_masked(
966 ®s->port_regs->vc0_non_posted_rcv_q_ctrl,
967 RADM_NPQ_HCRD_VC0_MASK,
968 (pcie_port->ib_hcrd_config.nof_np_hdr - 1)
969 << RADM_NPQ_HCRD_VC0_SHIFT);
973 al_pcie_port_max_num_of_pfs_get(struct al_pcie_port *pcie_port)
975 struct al_pcie_regs *regs = pcie_port->regs;
976 uint32_t max_func_num;
977 uint32_t max_num_of_pfs;
980 * Only in REV3, when port is already enabled, max_num_of_pfs is already
981 * initialized, return it. Otherwise, return default: 1 PF
983 if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
984 && al_pcie_port_is_enabled(pcie_port)) {
985 max_func_num = al_reg_read32(®s->port_regs->timer_ctrl_max_func_num);
986 max_num_of_pfs = AL_REG_FIELD_GET(max_func_num, PCIE_PORT_GEN3_MAX_FUNC_NUM, 0) + 1;
987 return max_num_of_pfs;
992 /******************************************************************************/
993 /***************************** API Implementation *****************************/
994 /******************************************************************************/
996 /*************************** PCIe Initialization API **************************/
999 * Initializes a PCIe port handle structure
1000 * Caution: this function should not read/write to any register except for
1001 * reading RO register (REV_ID for example)
1004 al_pcie_port_handle_init(
1005 struct al_pcie_port *pcie_port,
1006 void __iomem *pcie_reg_base,
1007 void __iomem *pbs_reg_base,
1008 unsigned int port_id)
1012 pcie_port->pcie_reg_base = pcie_reg_base;
1013 pcie_port->regs = &pcie_port->regs_ptrs;
1014 pcie_port->ex_regs = NULL;
1015 pcie_port->pbs_regs = pbs_reg_base;
1016 pcie_port->port_id = port_id;
1017 pcie_port->max_lanes = 0;
1019 ret = al_pcie_rev_id_get(pbs_reg_base, pcie_reg_base);
1023 pcie_port->rev_id = ret;
1026 al_memset(pcie_port->regs, 0, sizeof(struct al_pcie_regs));
1028 if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
1029 (pcie_port->rev_id == AL_PCIE_REV_ID_1)) {
1030 struct al_pcie_rev1_regs __iomem *regs =
1031 (struct al_pcie_rev1_regs __iomem *)pcie_reg_base;
1033 pcie_port->regs->axi.ctrl.global = ®s->axi.ctrl.global;
1034 pcie_port->regs->axi.ctrl.master_arctl = ®s->axi.ctrl.master_arctl;
1035 pcie_port->regs->axi.ctrl.master_awctl = ®s->axi.ctrl.master_awctl;
1036 pcie_port->regs->axi.ctrl.slv_ctl = ®s->axi.ctrl.slv_ctl;
1037 pcie_port->regs->axi.ob_ctrl.cfg_target_bus = ®s->axi.ob_ctrl.cfg_target_bus;
1038 pcie_port->regs->axi.ob_ctrl.cfg_control = ®s->axi.ob_ctrl.cfg_control;
1039 pcie_port->regs->axi.ob_ctrl.io_start_l = ®s->axi.ob_ctrl.io_start_l;
1040 pcie_port->regs->axi.ob_ctrl.io_start_h = ®s->axi.ob_ctrl.io_start_h;
1041 pcie_port->regs->axi.ob_ctrl.io_limit_l = ®s->axi.ob_ctrl.io_limit_l;
1042 pcie_port->regs->axi.ob_ctrl.io_limit_h = ®s->axi.ob_ctrl.io_limit_h;
1043 pcie_port->regs->axi.pcie_global.conf = ®s->axi.pcie_global.conf;
1044 pcie_port->regs->axi.conf.zero_lane0 = ®s->axi.conf.zero_lane0;
1045 pcie_port->regs->axi.conf.zero_lane1 = ®s->axi.conf.zero_lane1;
1046 pcie_port->regs->axi.conf.zero_lane2 = ®s->axi.conf.zero_lane2;
1047 pcie_port->regs->axi.conf.zero_lane3 = ®s->axi.conf.zero_lane3;
1048 pcie_port->regs->axi.status.lane[0] = ®s->axi.status.lane0;
1049 pcie_port->regs->axi.status.lane[1] = ®s->axi.status.lane1;
1050 pcie_port->regs->axi.status.lane[2] = ®s->axi.status.lane2;
1051 pcie_port->regs->axi.status.lane[3] = ®s->axi.status.lane3;
1052 pcie_port->regs->axi.parity.en_axi = ®s->axi.parity.en_axi;
1053 pcie_port->regs->axi.ordering.pos_cntl = ®s->axi.ordering.pos_cntl;
1054 pcie_port->regs->axi.pre_configuration.pcie_core_setup = ®s->axi.pre_configuration.pcie_core_setup;
1055 pcie_port->regs->axi.init_fc.cfg = ®s->axi.init_fc.cfg;
1056 pcie_port->regs->axi.int_grp_a = ®s->axi.int_grp_a;
1058 pcie_port->regs->app.global_ctrl.port_init = ®s->app.global_ctrl.port_init;
1059 pcie_port->regs->app.global_ctrl.pm_control = ®s->app.global_ctrl.pm_control;
1060 pcie_port->regs->app.global_ctrl.events_gen[0] = ®s->app.global_ctrl.events_gen;
1061 pcie_port->regs->app.debug = ®s->app.debug;
1062 pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = ®s->app.soc_int.mask_inta_leg_0;
1063 pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = ®s->app.soc_int.mask_msi_leg_0;
1064 pcie_port->regs->app.ctrl_gen = ®s->app.ctrl_gen;
1065 pcie_port->regs->app.parity = ®s->app.parity;
1066 pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1067 pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1069 if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
1070 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a_m0;
1071 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b_m0;
1073 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a;
1074 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b;
1077 pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1078 pcie_port->regs->core_space[0].pcie_pm_cap_base = ®s->core_space.pcie_pm_cap_base;
1079 pcie_port->regs->core_space[0].pcie_cap_base = ®s->core_space.pcie_cap_base;
1080 pcie_port->regs->core_space[0].pcie_dev_cap_base = ®s->core_space.pcie_dev_cap_base;
1081 pcie_port->regs->core_space[0].pcie_dev_ctrl_status = ®s->core_space.pcie_dev_ctrl_status;
1082 pcie_port->regs->core_space[0].pcie_link_cap_base = ®s->core_space.pcie_link_cap_base;
1083 pcie_port->regs->core_space[0].msix_cap_base = ®s->core_space.msix_cap_base;
1084 pcie_port->regs->core_space[0].aer = ®s->core_space.aer;
1085 pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = ®s->core_space.pcie_sec_ext_cap_base;
1087 pcie_port->regs->port_regs = ®s->core_space.port_regs;
1089 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_2) {
1090 struct al_pcie_rev2_regs __iomem *regs =
1091 (struct al_pcie_rev2_regs __iomem *)pcie_reg_base;
1093 pcie_port->regs->axi.ctrl.global = ®s->axi.ctrl.global;
1094 pcie_port->regs->axi.ctrl.master_arctl = ®s->axi.ctrl.master_arctl;
1095 pcie_port->regs->axi.ctrl.master_awctl = ®s->axi.ctrl.master_awctl;
1096 pcie_port->regs->axi.ctrl.slv_ctl = ®s->axi.ctrl.slv_ctl;
1097 pcie_port->regs->axi.ob_ctrl.cfg_target_bus = ®s->axi.ob_ctrl.cfg_target_bus;
1098 pcie_port->regs->axi.ob_ctrl.cfg_control = ®s->axi.ob_ctrl.cfg_control;
1099 pcie_port->regs->axi.ob_ctrl.io_start_l = ®s->axi.ob_ctrl.io_start_l;
1100 pcie_port->regs->axi.ob_ctrl.io_start_h = ®s->axi.ob_ctrl.io_start_h;
1101 pcie_port->regs->axi.ob_ctrl.io_limit_l = ®s->axi.ob_ctrl.io_limit_l;
1102 pcie_port->regs->axi.ob_ctrl.io_limit_h = ®s->axi.ob_ctrl.io_limit_h;
1103 pcie_port->regs->axi.pcie_global.conf = ®s->axi.pcie_global.conf;
1104 pcie_port->regs->axi.conf.zero_lane0 = ®s->axi.conf.zero_lane0;
1105 pcie_port->regs->axi.conf.zero_lane1 = ®s->axi.conf.zero_lane1;
1106 pcie_port->regs->axi.conf.zero_lane2 = ®s->axi.conf.zero_lane2;
1107 pcie_port->regs->axi.conf.zero_lane3 = ®s->axi.conf.zero_lane3;
1108 pcie_port->regs->axi.status.lane[0] = ®s->axi.status.lane0;
1109 pcie_port->regs->axi.status.lane[1] = ®s->axi.status.lane1;
1110 pcie_port->regs->axi.status.lane[2] = ®s->axi.status.lane2;
1111 pcie_port->regs->axi.status.lane[3] = ®s->axi.status.lane3;
1112 pcie_port->regs->axi.parity.en_axi = ®s->axi.parity.en_axi;
1113 pcie_port->regs->axi.ordering.pos_cntl = ®s->axi.ordering.pos_cntl;
1114 pcie_port->regs->axi.pre_configuration.pcie_core_setup = ®s->axi.pre_configuration.pcie_core_setup;
1115 pcie_port->regs->axi.init_fc.cfg = ®s->axi.init_fc.cfg;
1116 pcie_port->regs->axi.int_grp_a = ®s->axi.int_grp_a;
1118 pcie_port->regs->app.global_ctrl.port_init = ®s->app.global_ctrl.port_init;
1119 pcie_port->regs->app.global_ctrl.pm_control = ®s->app.global_ctrl.pm_control;
1120 pcie_port->regs->app.global_ctrl.events_gen[0] = ®s->app.global_ctrl.events_gen;
1121 pcie_port->regs->app.global_ctrl.corr_err_sts_int = ®s->app.global_ctrl.pended_corr_err_sts_int;
1122 pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = ®s->app.global_ctrl.pended_uncorr_err_sts_int;
1123 pcie_port->regs->app.debug = ®s->app.debug;
1124 pcie_port->regs->app.ap_user_send_msg = ®s->app.ap_user_send_msg;
1125 pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = ®s->app.soc_int.mask_inta_leg_0;
1126 pcie_port->regs->app.soc_int[0].mask_inta_leg_3 = ®s->app.soc_int.mask_inta_leg_3;
1127 pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = ®s->app.soc_int.mask_msi_leg_0;
1128 pcie_port->regs->app.soc_int[0].mask_msi_leg_3 = ®s->app.soc_int.mask_msi_leg_3;
1129 pcie_port->regs->app.ctrl_gen = ®s->app.ctrl_gen;
1130 pcie_port->regs->app.parity = ®s->app.parity;
1131 pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1132 pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1133 pcie_port->regs->app.status_per_func[0] = ®s->app.status_per_func;
1134 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a;
1135 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b;
1137 pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1138 pcie_port->regs->core_space[0].pcie_pm_cap_base = ®s->core_space.pcie_pm_cap_base;
1139 pcie_port->regs->core_space[0].pcie_cap_base = ®s->core_space.pcie_cap_base;
1140 pcie_port->regs->core_space[0].pcie_dev_cap_base = ®s->core_space.pcie_dev_cap_base;
1141 pcie_port->regs->core_space[0].pcie_dev_ctrl_status = ®s->core_space.pcie_dev_ctrl_status;
1142 pcie_port->regs->core_space[0].pcie_link_cap_base = ®s->core_space.pcie_link_cap_base;
1143 pcie_port->regs->core_space[0].msix_cap_base = ®s->core_space.msix_cap_base;
1144 pcie_port->regs->core_space[0].aer = ®s->core_space.aer;
1145 pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = ®s->core_space.pcie_sec_ext_cap_base;
1147 pcie_port->regs->port_regs = ®s->core_space.port_regs;
1149 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1150 struct al_pcie_rev3_regs __iomem *regs =
1151 (struct al_pcie_rev3_regs __iomem *)pcie_reg_base;
1152 pcie_port->regs->axi.ctrl.global = ®s->axi.ctrl.global;
1153 pcie_port->regs->axi.ctrl.master_arctl = ®s->axi.ctrl.master_arctl;
1154 pcie_port->regs->axi.ctrl.master_awctl = ®s->axi.ctrl.master_awctl;
1155 pcie_port->regs->axi.ctrl.slv_ctl = ®s->axi.ctrl.slv_ctl;
1156 pcie_port->regs->axi.ob_ctrl.cfg_target_bus = ®s->axi.ob_ctrl.cfg_target_bus;
1157 pcie_port->regs->axi.ob_ctrl.cfg_control = ®s->axi.ob_ctrl.cfg_control;
1158 pcie_port->regs->axi.ob_ctrl.io_start_l = ®s->axi.ob_ctrl.io_start_l;
1159 pcie_port->regs->axi.ob_ctrl.io_start_h = ®s->axi.ob_ctrl.io_start_h;
1160 pcie_port->regs->axi.ob_ctrl.io_limit_l = ®s->axi.ob_ctrl.io_limit_l;
1161 pcie_port->regs->axi.ob_ctrl.io_limit_h = ®s->axi.ob_ctrl.io_limit_h;
1162 pcie_port->regs->axi.pcie_global.conf = ®s->axi.pcie_global.conf;
1163 pcie_port->regs->axi.conf.zero_lane0 = ®s->axi.conf.zero_lane0;
1164 pcie_port->regs->axi.conf.zero_lane1 = ®s->axi.conf.zero_lane1;
1165 pcie_port->regs->axi.conf.zero_lane2 = ®s->axi.conf.zero_lane2;
1166 pcie_port->regs->axi.conf.zero_lane3 = ®s->axi.conf.zero_lane3;
1167 pcie_port->regs->axi.conf.zero_lane4 = ®s->axi.conf.zero_lane4;
1168 pcie_port->regs->axi.conf.zero_lane5 = ®s->axi.conf.zero_lane5;
1169 pcie_port->regs->axi.conf.zero_lane6 = ®s->axi.conf.zero_lane6;
1170 pcie_port->regs->axi.conf.zero_lane7 = ®s->axi.conf.zero_lane7;
1171 pcie_port->regs->axi.status.lane[0] = ®s->axi.status.lane0;
1172 pcie_port->regs->axi.status.lane[1] = ®s->axi.status.lane1;
1173 pcie_port->regs->axi.status.lane[2] = ®s->axi.status.lane2;
1174 pcie_port->regs->axi.status.lane[3] = ®s->axi.status.lane3;
1175 pcie_port->regs->axi.status.lane[4] = ®s->axi.status.lane4;
1176 pcie_port->regs->axi.status.lane[5] = ®s->axi.status.lane5;
1177 pcie_port->regs->axi.status.lane[6] = ®s->axi.status.lane6;
1178 pcie_port->regs->axi.status.lane[7] = ®s->axi.status.lane7;
1179 pcie_port->regs->axi.parity.en_axi = ®s->axi.parity.en_axi;
1180 pcie_port->regs->axi.ordering.pos_cntl = ®s->axi.ordering.pos_cntl;
1181 pcie_port->regs->axi.pre_configuration.pcie_core_setup = ®s->axi.pre_configuration.pcie_core_setup;
1182 pcie_port->regs->axi.init_fc.cfg = ®s->axi.init_fc.cfg;
1183 pcie_port->regs->axi.int_grp_a = ®s->axi.int_grp_a;
1184 pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_0 = ®s->axi.axi_attr_ovrd.write_msg_ctrl_0;
1185 pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_1 = ®s->axi.axi_attr_ovrd.write_msg_ctrl_1;
1186 pcie_port->regs->axi.axi_attr_ovrd.pf_sel = ®s->axi.axi_attr_ovrd.pf_sel;
1188 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1189 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_0 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_0;
1190 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_1 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_1;
1191 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_2 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_2;
1192 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_3 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_3;
1193 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_4 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_4;
1194 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_5 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_5;
1195 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_6 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_6;
1196 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_7 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_7;
1197 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_8 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_8;
1198 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_9 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_9;
1201 pcie_port->regs->axi.msg_attr_axuser_table.entry_vec = ®s->axi.msg_attr_axuser_table.entry_vec;
1203 pcie_port->regs->app.global_ctrl.port_init = ®s->app.global_ctrl.port_init;
1204 pcie_port->regs->app.global_ctrl.pm_control = ®s->app.global_ctrl.pm_control;
1205 pcie_port->regs->app.global_ctrl.corr_err_sts_int = ®s->app.global_ctrl.pended_corr_err_sts_int;
1206 pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = ®s->app.global_ctrl.pended_uncorr_err_sts_int;
1208 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1209 pcie_port->regs->app.global_ctrl.events_gen[i] = ®s->app.events_gen_per_func[i].events_gen;
1212 pcie_port->regs->app.global_ctrl.sris_kp_counter = ®s->app.global_ctrl.sris_kp_counter_value;
1213 pcie_port->regs->app.debug = ®s->app.debug;
1215 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1216 pcie_port->regs->app.soc_int[i].mask_inta_leg_0 = ®s->app.soc_int_per_func[i].mask_inta_leg_0;
1217 pcie_port->regs->app.soc_int[i].mask_inta_leg_3 = ®s->app.soc_int_per_func[i].mask_inta_leg_3;
1218 pcie_port->regs->app.soc_int[i].mask_msi_leg_0 = ®s->app.soc_int_per_func[i].mask_msi_leg_0;
1219 pcie_port->regs->app.soc_int[i].mask_msi_leg_3 = ®s->app.soc_int_per_func[i].mask_msi_leg_3;
1222 pcie_port->regs->app.ap_user_send_msg = ®s->app.ap_user_send_msg;
1223 pcie_port->regs->app.ctrl_gen = ®s->app.ctrl_gen;
1224 pcie_port->regs->app.parity = ®s->app.parity;
1225 pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1226 pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1228 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++)
1229 pcie_port->regs->app.status_per_func[i] = ®s->app.status_per_func[i];
1231 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a;
1232 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b;
1233 pcie_port->regs->app.int_grp_c = ®s->app.int_grp_c;
1234 pcie_port->regs->app.int_grp_d = ®s->app.int_grp_d;
1236 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1237 pcie_port->regs->core_space[i].config_header = regs->core_space.func[i].config_header;
1238 pcie_port->regs->core_space[i].pcie_pm_cap_base = ®s->core_space.func[i].pcie_pm_cap_base;
1239 pcie_port->regs->core_space[i].pcie_cap_base = ®s->core_space.func[i].pcie_cap_base;
1240 pcie_port->regs->core_space[i].pcie_dev_cap_base = ®s->core_space.func[i].pcie_dev_cap_base;
1241 pcie_port->regs->core_space[i].pcie_dev_ctrl_status = ®s->core_space.func[i].pcie_dev_ctrl_status;
1242 pcie_port->regs->core_space[i].pcie_link_cap_base = ®s->core_space.func[i].pcie_link_cap_base;
1243 pcie_port->regs->core_space[i].msix_cap_base = ®s->core_space.func[i].msix_cap_base;
1244 pcie_port->regs->core_space[i].aer = ®s->core_space.func[i].aer;
1245 pcie_port->regs->core_space[i].tph_cap_base = ®s->core_space.func[i].tph_cap_base;
1249 /* secondary extension capability only for PF0 */
1250 pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = ®s->core_space.func[0].pcie_sec_ext_cap_base;
1252 pcie_port->regs->port_regs = ®s->core_space.func[0].port_regs;
1255 al_warn("%s: Revision ID is unknown\n",
1260 /* set maximum number of physical functions */
1261 pcie_port->max_num_of_pfs = al_pcie_port_max_num_of_pfs_get(pcie_port);
1263 al_dbg("pcie port handle initialized. port id: %d, rev_id %d, regs base %p\n",
1264 port_id, pcie_port->rev_id, pcie_reg_base);
1269 * Initializes a PCIe Physical function handle structure
1270 * Caution: this function should not read/write to any register except for
1271 * reading RO register (REV_ID for example)
1274 al_pcie_pf_handle_init(
1275 struct al_pcie_pf *pcie_pf,
1276 struct al_pcie_port *pcie_port,
1277 unsigned int pf_num)
1279 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
1280 al_assert(pf_num < pcie_port->max_num_of_pfs);
1282 if (op_mode != AL_PCIE_OPERATING_MODE_EP) {
1283 al_err("PCIe %d: can't init PF handle with operating mode [%d]\n",
1284 pcie_port->port_id, op_mode);
1288 pcie_pf->pf_num = pf_num;
1289 pcie_pf->pcie_port = pcie_port;
1291 al_dbg("PCIe %d: pf handle initialized. pf number: %d, rev_id %d, regs %p\n",
1292 pcie_port->port_id, pcie_pf->pf_num, pcie_port->rev_id,
1297 /************************** Pre PCIe Port Enable API **************************/
1299 /** configure pcie operating mode (root complex or endpoint) */
1301 al_pcie_port_operating_mode_config(
1302 struct al_pcie_port *pcie_port,
1303 enum al_pcie_operating_mode mode)
1305 struct al_pcie_regs *regs = pcie_port->regs;
1306 uint32_t reg, device_type, new_device_type;
1308 if (al_pcie_port_is_enabled(pcie_port)) {
1309 al_err("PCIe %d: already enabled, cannot set operating mode\n",
1310 pcie_port->port_id);
1314 reg = al_reg_read32(regs->axi.pcie_global.conf);
1316 device_type = AL_REG_FIELD_GET(reg,
1317 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1318 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1319 if (mode == AL_PCIE_OPERATING_MODE_EP) {
1320 new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP;
1321 } else if (mode == AL_PCIE_OPERATING_MODE_RC) {
1322 new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC;
1324 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1325 /* config 1 PF in RC mode */
1326 al_reg_write32_masked(regs->axi.axi_attr_ovrd.pf_sel,
1327 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_AXUSER |
1328 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1329 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_ADDR_OFFSET_MASK |
1330 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT0_OVRD |
1331 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_AXUSER |
1332 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG |
1333 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_ADDR_OFFSET_MASK |
1334 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT1_OVRD,
1335 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1336 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG);
1339 al_err("PCIe %d: unknown operating mode: %d\n", pcie_port->port_id, mode);
1343 if (new_device_type == device_type) {
1344 al_dbg("PCIe %d: operating mode already set to %s\n",
1345 pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1346 "EndPoint" : "Root Complex");
1349 al_info("PCIe %d: set operating mode to %s\n",
1350 pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1351 "EndPoint" : "Root Complex");
1352 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1353 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT,
1356 al_reg_write32(regs->axi.pcie_global.conf, reg);
1362 al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
1364 struct al_pcie_regs *regs = pcie_port->regs;
1366 if (al_pcie_port_is_enabled(pcie_port)) {
1367 al_err("PCIe %d: already enabled, cannot set max lanes\n",
1368 pcie_port->port_id);
1372 /* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
1373 uint32_t active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
1375 al_reg_write32_masked(regs->axi.pcie_global.conf,
1376 (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1377 PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1378 PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1381 pcie_port->max_lanes = lanes;
1386 al_pcie_port_max_num_of_pfs_set(
1387 struct al_pcie_port *pcie_port,
1388 uint8_t max_num_of_pfs)
1390 if (al_pcie_port_is_enabled(pcie_port)) {
1391 al_err("PCIe %d: already enabled, cannot set max num of PFs\n",
1392 pcie_port->port_id);
1396 if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
1397 al_assert(max_num_of_pfs <= REV3_MAX_NUM_OF_PFS);
1399 al_assert(max_num_of_pfs == REV1_2_MAX_NUM_OF_PFS);
1401 pcie_port->max_num_of_pfs = max_num_of_pfs;
1406 /* Inbound header credits and outstanding outbound reads configuration */
1408 al_pcie_port_ib_hcrd_os_ob_reads_config(
1409 struct al_pcie_port *pcie_port,
1410 struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config)
1412 struct al_pcie_regs *regs = pcie_port->regs;
1414 if (al_pcie_port_is_enabled(pcie_port)) {
1415 al_err("PCIe %d: already enabled, cannot configure IB credits and OB OS reads\n",
1416 pcie_port->port_id);
1420 al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0);
1422 al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0);
1424 al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0);
1426 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1428 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1429 ib_hcrd_os_ob_reads_config->nof_np_hdr +
1430 ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1431 AL_PCIE_REV3_IB_HCRD_SUM);
1433 al_reg_write32_masked(
1434 regs->axi.init_fc.cfg,
1435 PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_MASK |
1436 PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_MASK |
1437 PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1438 (ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1439 PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1440 (ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1441 PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1442 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1443 PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1446 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1447 ib_hcrd_os_ob_reads_config->nof_np_hdr +
1448 ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1449 AL_PCIE_REV_1_2_IB_HCRD_SUM);
1451 al_reg_write32_masked(
1452 regs->axi.init_fc.cfg,
1453 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_MASK |
1454 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_MASK |
1455 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1456 (ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1457 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1458 (ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1459 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1460 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1461 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1464 al_reg_write32_masked(
1465 regs->axi.pre_configuration.pcie_core_setup,
1466 PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
1467 ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads <<
1468 PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
1470 /* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */
1471 pcie_port->ib_hcrd_config.nof_np_hdr =
1472 ib_hcrd_os_ob_reads_config->nof_np_hdr;
1473 pcie_port->ib_hcrd_config.nof_p_hdr =
1474 ib_hcrd_os_ob_reads_config->nof_p_hdr;
1479 enum al_pcie_operating_mode
1480 al_pcie_operating_mode_get(
1481 struct al_pcie_port *pcie_port)
1483 struct al_pcie_regs *regs = pcie_port->regs;
1484 uint32_t reg, device_type;
1486 al_assert(pcie_port);
1488 reg = al_reg_read32(regs->axi.pcie_global.conf);
1490 device_type = AL_REG_FIELD_GET(reg,
1491 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1492 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1494 switch (device_type) {
1495 case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP:
1496 return AL_PCIE_OPERATING_MODE_EP;
1497 case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC:
1498 return AL_PCIE_OPERATING_MODE_RC;
1500 al_err("PCIe %d: unknown device type (%d) in global conf register.\n",
1501 pcie_port->port_id, device_type);
1503 return AL_PCIE_OPERATING_MODE_UNKNOWN;
1506 /**************************** PCIe Port Enable API ****************************/
1508 /** Enable PCIe port (deassert reset) */
1510 al_pcie_port_enable(struct al_pcie_port *pcie_port)
1512 struct al_pbs_regs *pbs_reg_base =
1513 (struct al_pbs_regs *)pcie_port->pbs_regs;
1514 struct al_pcie_regs *regs = pcie_port->regs;
1515 unsigned int port_id = pcie_port->port_id;
1517 /* pre-port-enable default functionality should be here */
1520 * Set inbound header credit and outstanding outbound reads defaults
1521 * Must be called before port enable (PCIE_EXIST)
1523 al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
1526 * Disable ATS capability
1527 * - must be done before core reset deasserted
1528 * - rev_id 0 - no effect, but no harm
1530 if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
1531 (pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
1532 (pcie_port->rev_id == AL_PCIE_REV_ID_2)) {
1533 al_reg_write32_masked(
1534 regs->axi.ordering.pos_cntl,
1535 PCIE_AXI_CORE_SETUP_ATS_CAP_DIS,
1536 PCIE_AXI_CORE_SETUP_ATS_CAP_DIS);
1539 /* Deassert core reset */
1540 al_reg_write32_masked(
1541 &pbs_reg_base->unit.pcie_conf_1,
1542 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1543 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT));
1548 /** Disable PCIe port (assert reset) */
1550 al_pcie_port_disable(struct al_pcie_port *pcie_port)
1552 struct al_pbs_regs *pbs_reg_base =
1553 (struct al_pbs_regs *)pcie_port->pbs_regs;
1554 unsigned int port_id = pcie_port->port_id;
1556 if (!al_pcie_port_is_enabled(pcie_port)) {
1557 al_warn("PCIe %d: trying to disable a non-enabled port\n",
1558 pcie_port->port_id);
1561 /* Assert core reset */
1562 al_reg_write32_masked(
1563 &pbs_reg_base->unit.pcie_conf_1,
1564 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1569 al_pcie_port_memory_shutdown_set(
1570 struct al_pcie_port *pcie_port,
1573 struct al_pcie_regs *regs = pcie_port->regs;
1574 uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1575 PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN :
1576 PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
1578 if (!al_pcie_port_is_enabled(pcie_port)) {
1579 al_err("PCIe %d: not enabled, cannot shutdown memory\n",
1580 pcie_port->port_id);
1584 al_reg_write32_masked(regs->axi.pcie_global.conf,
1585 mask, enable == AL_TRUE ? mask : 0);
1591 al_pcie_port_is_enabled(struct al_pcie_port *pcie_port)
1593 struct al_pbs_regs *pbs_reg_base = (struct al_pbs_regs *)pcie_port->pbs_regs;
1594 uint32_t pcie_exist = al_reg_read32(&pbs_reg_base->unit.pcie_conf_1);
1596 uint32_t ports_enabled = AL_REG_FIELD_GET(pcie_exist,
1597 PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK,
1598 PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT);
1600 return (AL_REG_FIELD_GET(ports_enabled, AL_BIT(pcie_port->port_id),
1601 pcie_port->port_id) == 1);
1604 /*************************** PCIe Configuration API ***************************/
1606 /** configure pcie port (link params, etc..) */
1608 al_pcie_port_config(struct al_pcie_port *pcie_port,
1609 const struct al_pcie_port_config_params *params)
1611 struct al_pcie_regs *regs = pcie_port->regs;
1612 enum al_pcie_operating_mode op_mode;
1616 if (!al_pcie_port_is_enabled(pcie_port)) {
1617 al_err("PCIe %d: port not enabled, cannot configure port\n",
1618 pcie_port->port_id);
1622 if (al_pcie_is_link_started(pcie_port)) {
1623 al_err("PCIe %d: link already started, cannot configure port\n",
1624 pcie_port->port_id);
1628 al_assert(pcie_port);
1631 al_dbg("PCIe %d: port config\n", pcie_port->port_id);
1633 op_mode = al_pcie_operating_mode_get(pcie_port);
1635 /* if max lanes not specifies, read it from register */
1636 if (pcie_port->max_lanes == 0) {
1637 uint32_t global_conf = al_reg_read32(regs->axi.pcie_global.conf);
1638 uint32_t act_lanes = AL_REG_FIELD_GET(global_conf,
1639 (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1640 PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1641 PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1642 PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT);
1646 pcie_port->max_lanes = 1;
1649 pcie_port->max_lanes = 2;
1652 pcie_port->max_lanes = 4;
1655 pcie_port->max_lanes = 8;
1658 pcie_port->max_lanes = 0;
1659 al_err("PCIe %d: invalid max lanes val (0x%x)\n", pcie_port->port_id, act_lanes);
1664 if (params->link_params)
1665 status = al_pcie_port_link_config(pcie_port, params->link_params);
1669 /* Change max read request size to 256 bytes
1670 * Max Payload Size is remained untouched- it is the responsibility of
1671 * the host to change the MPS, if needed.
1673 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1674 al_reg_write32_masked(regs->core_space[i].pcie_dev_ctrl_status,
1675 PCIE_PORT_DEV_CTRL_STATUS_MRRS_MASK,
1676 PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_256);
1677 if (pcie_port->rev_id != AL_PCIE_REV_ID_3)
1681 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1682 /* Set maximum physical function numbers */
1683 al_reg_write32_masked(
1684 ®s->port_regs->timer_ctrl_max_func_num,
1685 PCIE_PORT_GEN3_MAX_FUNC_NUM,
1686 pcie_port->max_num_of_pfs - 1);
1688 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1691 * in EP mode, when we have more than 1 PF we need to assert
1692 * multi-pf support so the host scan all PFs
1694 if ((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1)) {
1695 al_reg_write32_masked((uint32_t __iomem *)
1696 (®s->core_space[0].config_header[0] +
1697 (PCIE_BIST_HEADER_TYPE_BASE >> 2)),
1698 PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
1699 PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK);
1702 /* Disable TPH next pointer */
1703 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1704 al_reg_write32_masked(regs->core_space[i].tph_cap_base,
1705 PCIE_TPH_NEXT_POINTER, 0);
1708 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1712 status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop);
1716 al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
1718 al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
1720 al_pcie_port_relaxed_pcie_ordering_config(pcie_port, params->relaxed_ordering_params);
1722 if (params->lat_rply_timers)
1723 status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers);
1727 if (params->gen2_params)
1728 status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params);
1732 if (params->gen3_params)
1733 status = al_pcie_port_gen3_params_config(pcie_port, params->gen3_params);
1737 if (params->tl_credits)
1738 status = al_pcie_port_tl_credits_config(pcie_port, params->tl_credits);
1742 if (params->features)
1743 al_pcie_port_features_config(pcie_port, params->features);
1745 if (params->sris_params)
1746 status = al_pcie_port_sris_config(pcie_port, params->sris_params,
1747 params->link_params->max_speed);
1751 al_pcie_port_ib_hcrd_config(pcie_port);
1753 if (params->fast_link_mode) {
1754 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1755 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT,
1756 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT);
1759 if (params->enable_axi_slave_err_resp)
1760 al_reg_write32_masked(®s->port_regs->axi_slave_err_resp,
1761 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT,
1762 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT);
1765 * Addressing RMN: 5477
1768 * address-decoder logic performs sub-target decoding even for transactions
1769 * which undergo target enforcement. thus, in case transaction's address is
1770 * inside any ECAM bar, the sub-target decoding will be set to ECAM, which
1771 * causes wrong handling by PCIe unit
1774 * on EP mode only, turning on the iATU-enable bit (with the relevant mask
1775 * below) allows the PCIe unit to discard the ECAM bit which was asserted
1776 * by-mistake in the address-decoder
1778 if (op_mode == AL_PCIE_OPERATING_MODE_EP) {
1779 al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1780 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1781 (0) << PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
1782 al_reg_write32_masked(regs->axi.ob_ctrl.cfg_control,
1783 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN,
1784 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN);
1787 if (op_mode == AL_PCIE_OPERATING_MODE_RC) {
1789 * enable memory and I/O access from port when in RC mode
1790 * in RC mode, only core_space[0] is valid.
1792 al_reg_write16_masked(
1793 (uint16_t __iomem *)(®s->core_space[0].config_header[0] + (0x4 >> 2)),
1794 0x7, /* Mem, MSE, IO */
1797 /* change the class code to match pci bridge */
1798 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1800 al_reg_write32_masked(
1801 (uint32_t __iomem *)(®s->core_space[0].config_header[0]
1802 + (PCI_CLASS_REVISION >> 2)),
1806 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1809 * Addressing RMN: 5702
1812 * target bus mask default value in HW is: 0xFE, this enforces
1813 * setting the target bus for ports 1 and 3 when running on RC
1814 * mode since bit[20] in ECAM address in these cases is set
1817 * on RC mode only, set target-bus value to 0xFF to prevent this
1820 al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1821 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1822 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK);
1825 al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done");
1832 struct al_pcie_pf *pcie_pf,
1833 const struct al_pcie_pf_config_params *params)
1835 struct al_pcie_port *pcie_port;
1841 pcie_port = pcie_pf->pcie_port;
1843 if (!al_pcie_port_is_enabled(pcie_port)) {
1844 al_err("PCIe %d: port not enabled, cannot configure port\n", pcie_port->port_id);
1848 al_dbg("PCIe %d: pf %d config\n", pcie_port->port_id, pcie_pf->pf_num);
1851 status = al_pcie_port_pf_params_config(pcie_pf, params);
1856 al_dbg("PCIe %d: pf %d config %s\n",
1857 pcie_port->port_id, pcie_pf->pf_num, status ? "failed" : "done");
1862 /************************** PCIe Link Operations API **************************/
1864 /* start pcie link */
1866 al_pcie_link_start(struct al_pcie_port *pcie_port)
1868 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1870 if (!al_pcie_port_is_enabled(pcie_port)) {
1871 al_err("PCIe %d: port not enabled, cannot start link\n",
1872 pcie_port->port_id);
1876 al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id);
1878 al_reg_write32_masked(
1879 regs->app.global_ctrl.port_init,
1880 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1881 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1886 /* stop pcie link */
1888 al_pcie_link_stop(struct al_pcie_port *pcie_port)
1890 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1892 if (!al_pcie_is_link_started(pcie_port)) {
1893 al_warn("PCIe %d: trying to stop a non-started link\n",
1894 pcie_port->port_id);
1897 al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id);
1899 al_reg_write32_masked(
1900 regs->app.global_ctrl.port_init,
1901 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1902 ~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1907 /* wait for link up indication */
1909 al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
1911 int wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC;
1913 while (wait_count-- > 0) {
1914 if (al_pcie_check_link(pcie_port, NULL)) {
1915 al_info("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
1918 al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
1919 pcie_port->port_id, wait_count);
1921 al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
1923 al_info("PCIE_%d: link is not established in time\n",
1924 pcie_port->port_id);
1929 /** get link status */
1931 al_pcie_link_status(struct al_pcie_port *pcie_port,
1932 struct al_pcie_link_status *status)
1934 struct al_pcie_regs *regs = pcie_port->regs;
1935 uint16_t pcie_lnksta;
1939 status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
1941 if (!status->link_up) {
1942 status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
1947 pcie_lnksta = al_reg_read16((uint16_t __iomem *)regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1));
1949 switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) {
1950 case AL_PCI_EXP_LNKSTA_CLS_2_5GB:
1951 status->speed = AL_PCIE_LINK_SPEED_GEN1;
1953 case AL_PCI_EXP_LNKSTA_CLS_5_0GB:
1954 status->speed = AL_PCIE_LINK_SPEED_GEN2;
1956 case AL_PCI_EXP_LNKSTA_CLS_8_0GB:
1957 status->speed = AL_PCIE_LINK_SPEED_GEN3;
1960 status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
1961 al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n",
1962 pcie_port->port_id, pcie_lnksta);
1964 status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
1965 al_info("PCIe %d: Link up. speed gen%d negotiated width %d\n",
1966 pcie_port->port_id, status->speed, status->lanes);
1971 /** get lane status */
1973 al_pcie_lane_status_get(
1974 struct al_pcie_port *pcie_port,
1976 struct al_pcie_lane_status *status)
1978 struct al_pcie_regs *regs = pcie_port->regs;
1979 uint32_t lane_status;
1982 al_assert(pcie_port);
1984 al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_1) || (lane < REV1_2_MAX_NUM_LANES));
1985 al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_2) || (lane < REV1_2_MAX_NUM_LANES));
1986 al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_3) || (lane < REV3_MAX_NUM_LANES));
1988 reg_ptr = regs->axi.status.lane[lane];
1990 /* Reset field is valid only when same value is read twice */
1992 lane_status = al_reg_read32(reg_ptr);
1993 status->is_reset = !!(lane_status & PCIE_AXI_STATUS_LANE_IS_RESET);
1994 } while (status->is_reset != (!!(al_reg_read32(reg_ptr) & PCIE_AXI_STATUS_LANE_IS_RESET)));
1996 status->requested_speed =
1997 (lane_status & PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_MASK) >>
1998 PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_SHIFT;
2001 /** trigger hot reset */
2003 al_pcie_link_hot_reset(struct al_pcie_port *pcie_port, al_bool enable)
2005 struct al_pcie_regs *regs = pcie_port->regs;
2006 uint32_t events_gen;
2007 al_bool app_reset_state;
2008 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2010 if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2011 al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2015 if (!al_pcie_is_link_started(pcie_port)) {
2016 al_err("PCIe %d: link not started, cannot trigger hot-reset\n", pcie_port->port_id);
2020 events_gen = al_reg_read32(regs->app.global_ctrl.events_gen[0]);
2021 app_reset_state = events_gen & PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT;
2023 if (enable && app_reset_state) {
2024 al_err("PCIe %d: link is already in hot-reset state\n", pcie_port->port_id);
2026 } else if ((!enable) && (!(app_reset_state))) {
2027 al_err("PCIe %d: link is already in non-hot-reset state\n", pcie_port->port_id);
2030 al_dbg("PCIe %d: %s hot-reset\n", pcie_port->port_id,
2031 (enable ? "enabling" : "disabling"));
2032 /* hot-reset functionality is implemented only for function 0 */
2033 al_reg_write32_masked(regs->app.global_ctrl.events_gen[0],
2034 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT,
2035 (enable ? PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT
2036 : ~PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT));
2041 /** disable port link */
2043 al_pcie_link_disable(struct al_pcie_port *pcie_port, al_bool disable)
2045 struct al_pcie_regs *regs = pcie_port->regs;
2046 uint32_t pcie_lnkctl;
2047 al_bool link_disable_state;
2048 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2050 if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2051 al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2055 if (!al_pcie_is_link_started(pcie_port)) {
2056 al_err("PCIe %d: link not started, cannot disable link\n", pcie_port->port_id);
2060 pcie_lnkctl = al_reg_read32(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1));
2061 link_disable_state = pcie_lnkctl & AL_PCI_EXP_LNKCTL_LNK_DIS;
2063 if (disable && link_disable_state) {
2064 al_err("PCIe %d: link is already in disable state\n", pcie_port->port_id);
2066 } else if ((!disable) && (!(link_disable_state))) {
2067 al_err("PCIe %d: link is already in enable state\n", pcie_port->port_id);
2071 al_dbg("PCIe %d: %s port\n", pcie_port->port_id, (disable ? "disabling" : "enabling"));
2072 al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2073 AL_PCI_EXP_LNKCTL_LNK_DIS,
2074 (disable ? AL_PCI_EXP_LNKCTL_LNK_DIS : ~AL_PCI_EXP_LNKCTL_LNK_DIS));
2080 al_pcie_link_retrain(struct al_pcie_port *pcie_port)
2082 struct al_pcie_regs *regs = pcie_port->regs;
2083 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2085 if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2086 al_err("PCIe %d: link-retrain is applicable only for RC mode\n",
2087 pcie_port->port_id);
2091 if (!al_pcie_is_link_started(pcie_port)) {
2092 al_err("PCIe %d: link not started, cannot link-retrain\n", pcie_port->port_id);
2096 al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2097 AL_PCI_EXP_LNKCTL_LNK_RTRN, AL_PCI_EXP_LNKCTL_LNK_RTRN);
2102 /* trigger speed change */
2104 al_pcie_link_change_speed(struct al_pcie_port *pcie_port,
2105 enum al_pcie_link_speed new_speed)
2107 struct al_pcie_regs *regs = pcie_port->regs;
2109 if (!al_pcie_is_link_started(pcie_port)) {
2110 al_err("PCIe %d: link not started, cannot change speed\n", pcie_port->port_id);
2114 al_dbg("PCIe %d: changing speed to %d\n", pcie_port->port_id, new_speed);
2116 al_pcie_port_link_speed_ctrl_set(pcie_port, new_speed);
2118 al_reg_write32_masked(®s->port_regs->gen2_ctrl,
2119 PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE,
2120 PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE);
2125 /* TODO: check if this function needed */
2127 al_pcie_link_change_width(struct al_pcie_port *pcie_port,
2128 uint8_t width __attribute__((__unused__)))
2130 al_err("PCIe %d: link change width not implemented\n",
2131 pcie_port->port_id);
2136 /**************************** Post Link Start API *****************************/
2138 /************************** Snoop Configuration API ***************************/
2141 al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop)
2143 struct al_pcie_regs *regs = pcie_port->regs;
2145 /* Set snoop mode */
2146 al_info("PCIE_%d: snoop mode %s\n",
2147 pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
2149 if (enable_axi_snoop) {
2150 al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2151 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2152 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP);
2154 al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2155 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2156 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP);
2158 al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2159 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2160 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP);
2162 al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2163 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2164 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP);
2169 /************************** Configuration Space API ***************************/
2171 /** get base address of pci configuration space header */
2173 al_pcie_config_space_get(struct al_pcie_pf *pcie_pf,
2174 uint8_t __iomem **addr)
2176 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2178 *addr = (uint8_t __iomem *)®s->core_space[pcie_pf->pf_num].config_header[0];
2182 /* Read data from the local configuration space */
2184 al_pcie_local_cfg_space_read(
2185 struct al_pcie_pf *pcie_pf,
2186 unsigned int reg_offset)
2188 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2191 data = al_reg_read32(®s->core_space[pcie_pf->pf_num].config_header[reg_offset]);
2196 /* Write data to the local configuration space */
2198 al_pcie_local_cfg_space_write(
2199 struct al_pcie_pf *pcie_pf,
2200 unsigned int reg_offset,
2203 al_bool allow_ro_wr)
2205 struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
2206 struct al_pcie_regs *regs = pcie_port->regs;
2207 unsigned int pf_num = pcie_pf->pf_num;
2208 uint32_t *offset = ®s->core_space[pf_num].config_header[reg_offset];
2211 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
2213 if (cs2 == AL_FALSE)
2214 al_reg_write32(offset, data);
2216 al_reg_write32_dbi_cs2(pcie_port, offset, data);
2219 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
2222 /** set target_bus and mask_target_bus */
2224 al_pcie_target_bus_set(
2225 struct al_pcie_port *pcie_port,
2227 uint8_t mask_target_bus)
2229 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2232 reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2233 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2234 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT,
2236 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2237 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT,
2239 al_reg_write32(regs->axi.ob_ctrl.cfg_target_bus, reg);
2243 /** get target_bus and mask_target_bus */
2245 al_pcie_target_bus_get(
2246 struct al_pcie_port *pcie_port,
2247 uint8_t *target_bus,
2248 uint8_t *mask_target_bus)
2250 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2253 al_assert(target_bus);
2254 al_assert(mask_target_bus);
2256 reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2258 *mask_target_bus = AL_REG_FIELD_GET(reg,
2259 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2260 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
2261 *target_bus = AL_REG_FIELD_GET(reg,
2262 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2263 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT);
2267 /** Set secondary bus number */
2269 al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus)
2271 struct al_pcie_regs *regs = pcie_port->regs;
2273 uint32_t secbus_val = (secbus <<
2274 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT);
2276 al_reg_write32_masked(
2277 regs->axi.ob_ctrl.cfg_control,
2278 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK,
2283 /** Set sub-ordinary bus number */
2285 al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus)
2287 struct al_pcie_regs *regs = pcie_port->regs;
2289 uint32_t subbus_val = (subbus <<
2290 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT);
2292 al_reg_write32_masked(
2293 regs->axi.ob_ctrl.cfg_control,
2294 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK,
2299 /* Enable/disable deferring incoming configuration requests */
2301 al_pcie_app_req_retry_set(
2302 struct al_pcie_port *pcie_port,
2305 struct al_pcie_regs *regs = pcie_port->regs;
2306 uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
2307 PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
2308 PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
2310 al_reg_write32_masked(regs->app.global_ctrl.pm_control,
2311 mask, (en == AL_TRUE) ? mask : 0);
2314 /*************** Internal Address Translation Unit (ATU) API ******************/
2316 /** program internal ATU region entry */
2318 al_pcie_atu_region_set(
2319 struct al_pcie_port *pcie_port,
2320 struct al_pcie_atu_region *atu_region)
2322 struct al_pcie_regs *regs = pcie_port->regs;
2323 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2327 * Addressing RMN: 5384
2330 * From SNPS (also included in the data book) Dynamic iATU Programming
2331 * With AHB/AXI Bridge Module When the bridge slave interface clock
2332 * (hresetn or slv_aclk) is asynchronous to the PCIe native core clock
2333 * (core_clk), you must not update the iATU registers while operations
2334 * are in progress on the AHB/AXI bridge slave interface. The iATU
2335 * registers are in the core_clk clock domain. The register outputs are
2336 * used in the AHB/AXI bridge slave interface clock domain. There is no
2337 * synchronization logic between these registers and the AHB/AXI bridge
2341 * Do not allow configuring Outbound iATU after link is started
2343 if ((atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)
2344 && (al_pcie_is_link_started(pcie_port))) {
2345 if (!atu_region->enforce_ob_atu_region_set) {
2346 al_err("PCIe %d: setting OB iATU after link is started is not allowed\n",
2347 pcie_port->port_id);
2350 al_info("PCIe %d: setting OB iATU even after link is started\n",
2351 pcie_port->port_id);
2355 /*TODO : add sanity check */
2356 AL_REG_FIELD_SET(reg, 0xF, 0, atu_region->index);
2357 AL_REG_BIT_VAL_SET(reg, 31, atu_region->direction);
2358 al_reg_write32(®s->port_regs->iatu.index, reg);
2360 al_reg_write32(®s->port_regs->iatu.lower_base_addr,
2361 (uint32_t)(atu_region->base_addr & 0xFFFFFFFF));
2362 al_reg_write32(®s->port_regs->iatu.upper_base_addr,
2363 (uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF));
2364 al_reg_write32(®s->port_regs->iatu.lower_target_addr,
2365 (uint32_t)(atu_region->target_addr & 0xFFFFFFFF));
2366 al_reg_write32(®s->port_regs->iatu.upper_target_addr,
2367 (uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF));
2369 /* configure the limit, not needed when working in BAR match mode */
2370 if (atu_region->match_mode == 0) {
2371 uint32_t limit_reg_val;
2372 if (pcie_port->rev_id > AL_PCIE_REV_ID_0) {
2373 uint32_t *limit_ext_reg =
2374 (atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
2375 ®s->app.atu.out_mask_pair[atu_region->index / 2] :
2376 ®s->app.atu.in_mask_pair[atu_region->index / 2];
2377 uint32_t limit_ext_reg_mask =
2378 (atu_region->index % 2) ?
2379 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
2380 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
2381 unsigned int limit_ext_reg_shift =
2382 (atu_region->index % 2) ?
2383 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
2384 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
2385 uint64_t limit_sz_msk =
2386 atu_region->limit - atu_region->base_addr;
2387 uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
2390 if (limit_ext_reg_val) {
2391 limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
2392 al_assert(limit_reg_val == 0xFFFFFFFF);
2394 limit_reg_val = (uint32_t)(atu_region->limit &
2398 al_reg_write32_masked(
2401 limit_ext_reg_val << limit_ext_reg_shift);
2403 limit_reg_val = (uint32_t)(atu_region->limit & 0xFFFFFFFF);
2406 al_reg_write32(®s->port_regs->iatu.limit_addr,
2411 AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type);
2412 AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr);
2415 if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
2416 && (op_mode == AL_PCIE_OPERATING_MODE_EP)
2417 && (atu_region->function_match_bypass_mode)) {
2418 AL_REG_FIELD_SET(reg,
2419 PCIE_IATU_CR1_FUNC_NUM_MASK,
2420 PCIE_IATU_CR1_FUNC_NUM_SHIFT,
2421 atu_region->function_match_bypass_mode_number);
2424 al_reg_write32(®s->port_regs->iatu.cr1, reg);
2426 /* Enable/disable the region. */
2428 AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code);
2429 AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number);
2430 AL_REG_FIELD_SET(reg, 0x3 << 24, 24, atu_region->response);
2431 AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE);
2432 AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE);
2433 AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE);
2434 AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE);
2435 if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO)
2436 AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode);
2437 AL_REG_BIT_VAL_SET(reg, 31, !!atu_region->enable);
2439 /* In outbound, enable function bypass
2440 * In inbound, enable function match mode
2441 * Note: this is the same bit, has different meanings in ob/ib ATUs
2443 if (op_mode == AL_PCIE_OPERATING_MODE_EP)
2444 AL_REG_FIELD_SET(reg,
2445 PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_MASK,
2446 PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_SHIFT,
2447 atu_region->function_match_bypass_mode ? 0x1 : 0x0);
2449 al_reg_write32(®s->port_regs->iatu.cr2, reg);
2454 /** obtains internal ATU region base/target addresses */
2456 al_pcie_atu_region_get_fields(
2457 struct al_pcie_port *pcie_port,
2458 enum al_pcie_atu_dir direction, uint8_t index,
2459 al_bool *enable, uint64_t *base_addr, uint64_t *target_addr)
2461 struct al_pcie_regs *regs = pcie_port->regs;
2465 AL_REG_FIELD_SET(reg, 0xF, 0, index);
2466 AL_REG_BIT_VAL_SET(reg, 31, direction);
2467 al_reg_write32(®s->port_regs->iatu.index, reg);
2469 *base_addr = al_reg_read32(®s->port_regs->iatu.lower_base_addr);
2470 high_addr = al_reg_read32(®s->port_regs->iatu.upper_base_addr);
2472 *base_addr |= high_addr;
2474 *target_addr = al_reg_read32(®s->port_regs->iatu.lower_target_addr);
2475 high_addr = al_reg_read32(®s->port_regs->iatu.upper_target_addr);
2477 *target_addr |= high_addr;
2479 reg = al_reg_read32(®s->port_regs->iatu.cr1);
2480 *enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
2484 al_pcie_axi_io_config(
2485 struct al_pcie_port *pcie_port,
2486 al_phys_addr_t start,
2489 struct al_pcie_regs *regs = pcie_port->regs;
2491 al_reg_write32(regs->axi.ob_ctrl.io_start_h,
2492 (uint32_t)((start >> 32) & 0xFFFFFFFF));
2494 al_reg_write32(regs->axi.ob_ctrl.io_start_l,
2495 (uint32_t)(start & 0xFFFFFFFF));
2497 al_reg_write32(regs->axi.ob_ctrl.io_limit_h,
2498 (uint32_t)((end >> 32) & 0xFFFFFFFF));
2500 al_reg_write32(regs->axi.ob_ctrl.io_limit_l,
2501 (uint32_t)(end & 0xFFFFFFFF));
2503 al_reg_write32_masked(regs->axi.ctrl.slv_ctl,
2504 PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN,
2505 PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
2508 /************** Interrupt generation (Endpoint mode Only) API *****************/
2510 /** generate INTx Assert/DeAssert Message */
2512 al_pcie_legacy_int_gen(
2513 struct al_pcie_pf *pcie_pf,
2515 enum al_pcie_legacy_int_type type)
2517 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2518 unsigned int pf_num = pcie_pf->pf_num;
2521 al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */
2522 reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2523 AL_REG_BIT_VAL_SET(reg, 3, !!assert);
2524 al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2529 /** generate MSI interrupt */
2531 al_pcie_msi_int_gen(struct al_pcie_pf *pcie_pf, uint8_t vector)
2533 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2534 unsigned int pf_num = pcie_pf->pf_num;
2537 /* set msi vector and clear MSI request */
2538 reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2539 AL_REG_BIT_CLEAR(reg, 4);
2540 AL_REG_FIELD_SET(reg,
2541 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK,
2542 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT,
2544 al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2545 /* set MSI request */
2546 AL_REG_BIT_SET(reg, 4);
2547 al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2552 /** configure MSIX capability */
2554 al_pcie_msix_config(
2555 struct al_pcie_pf *pcie_pf,
2556 struct al_pcie_msix_params *msix_params)
2558 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2559 unsigned int pf_num = pcie_pf->pf_num;
2562 al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_TRUE);
2564 msix_reg0 = al_reg_read32(regs->core_space[pf_num].msix_cap_base);
2566 msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT);
2567 msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) <<
2568 AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT;
2569 al_reg_write32(regs->core_space[pf_num].msix_cap_base, msix_reg0);
2571 /* Table offset & BAR */
2572 al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_TABLE >> 2),
2573 (msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) |
2574 (msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR));
2575 /* PBA offset & BAR */
2576 al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_PBA >> 2),
2577 (msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) |
2578 (msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR));
2580 al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_FALSE);
2585 /** check whether MSIX is enabled */
2587 al_pcie_msix_enabled(struct al_pcie_pf *pcie_pf)
2589 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2590 uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2592 if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN)
2597 /** check whether MSIX is masked */
2599 al_pcie_msix_masked(struct al_pcie_pf *pcie_pf)
2601 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2602 uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2604 if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK)
2609 /******************** Advanced Error Reporting (AER) API **********************/
2611 /** configure AER capability */
2614 struct al_pcie_pf *pcie_pf,
2615 struct al_pcie_aer_params *params)
2617 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2618 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2621 reg_val = al_reg_read32(&aer_regs->header);
2623 if (((reg_val & PCIE_AER_CAP_ID_MASK) >> PCIE_AER_CAP_ID_SHIFT) !=
2624 PCIE_AER_CAP_ID_VAL)
2627 if (((reg_val & PCIE_AER_CAP_VER_MASK) >> PCIE_AER_CAP_VER_SHIFT) !=
2628 PCIE_AER_CAP_VER_VAL)
2631 al_reg_write32(&aer_regs->corr_err_mask, ~params->enabled_corr_err);
2633 al_reg_write32(&aer_regs->uncorr_err_mask,
2634 (~params->enabled_uncorr_non_fatal_err) |
2635 (~params->enabled_uncorr_fatal_err));
2637 al_reg_write32(&aer_regs->uncorr_err_severity,
2638 params->enabled_uncorr_fatal_err);
2640 al_reg_write32(&aer_regs->cap_and_ctrl,
2641 (params->ecrc_gen_en ? PCIE_AER_CTRL_STAT_ECRC_GEN_EN : 0) |
2642 (params->ecrc_chk_en ? PCIE_AER_CTRL_STAT_ECRC_CHK_EN : 0));
2644 al_reg_write32_masked(
2645 regs->core_space[pcie_pf->pf_num].pcie_dev_ctrl_status,
2646 PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN |
2647 PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN |
2648 PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN |
2649 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN,
2650 (params->enabled_corr_err ?
2651 PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN : 0) |
2652 (params->enabled_uncorr_non_fatal_err ?
2653 PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN : 0) |
2654 (params->enabled_uncorr_fatal_err ?
2655 PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN : 0) |
2656 ((params->enabled_uncorr_non_fatal_err &
2657 AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2658 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0) |
2659 ((params->enabled_uncorr_fatal_err &
2660 AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2661 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0));
2666 /** AER uncorretable errors get and clear */
2668 al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
2670 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2671 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2674 reg_val = al_reg_read32(&aer_regs->uncorr_err_stat);
2675 al_reg_write32(&aer_regs->uncorr_err_stat, reg_val);
2680 /** AER corretable errors get and clear */
2682 al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
2684 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2685 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2688 reg_val = al_reg_read32(&aer_regs->corr_err_stat);
2689 al_reg_write32(&aer_regs->corr_err_stat, reg_val);
2694 #if (AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS != 4)
2695 #error Wrong assumption!
2698 /** AER get the header for the TLP corresponding to a detected error */
2700 al_pcie_aer_err_tlp_hdr_get(
2701 struct al_pcie_pf *pcie_pf,
2702 uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2704 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2705 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2708 for (i = 0; i < AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS; i++)
2709 hdr[i] = al_reg_read32(&aer_regs->header_log[i]);
2712 /********************** Loopback mode (RC and Endpoint modes) ************/
2714 /** enter local pipe loopback mode */
2716 al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port)
2718 struct al_pcie_regs *regs = pcie_port->regs;
2720 al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode", pcie_port->port_id);
2722 al_reg_write32_masked(®s->port_regs->pipe_loopback_ctrl,
2723 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2724 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
2726 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
2727 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2728 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT);
2734 * @brief exit local pipe loopback mode
2736 * @param pcie_port pcie port handle
2737 * @return 0 if no error found
2740 al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port)
2742 struct al_pcie_regs *regs = pcie_port->regs;
2744 al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode", pcie_port->port_id);
2746 al_reg_write32_masked(®s->port_regs->pipe_loopback_ctrl,
2747 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2750 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
2751 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2756 /** enter remote loopback mode */
2758 al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port)
2760 struct al_pcie_regs *regs = pcie_port->regs;
2762 al_dbg("PCIe %d: Enter REMOTE Loopback mode", pcie_port->port_id);
2764 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
2765 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2766 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
2772 * @brief exit remote loopback mode
2774 * @param pcie_port pcie port handle
2775 * @return 0 if no error found
2778 al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port)
2780 struct al_pcie_regs *regs = pcie_port->regs;
2782 al_dbg("PCIe %d: Exit REMOTE Loopback mode", pcie_port->port_id);
2784 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
2785 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,