2 ********************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
5 This file may be licensed under the terms of the Annapurna Labs Commercial
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
16 * Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *******************************************************************************/
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
44 #include "al_hal_pcie.h"
45 #include "al_hal_pbs_regs.h"
46 #include "al_hal_unit_adapter_regs.h"
49 * Parameter definitions
51 #define AL_PCIE_AXI_REGS_OFFSET 0x0
53 #define AL_PCIE_LTSSM_STATE_L0 0x11
54 #define AL_PCIE_LTSSM_STATE_L0S 0x12
55 #define AL_PCIE_DEVCTL_PAYLOAD_128B 0x00
56 #define AL_PCIE_DEVCTL_PAYLOAD_256B 0x20
58 #define AL_PCIE_SECBUS_DEFAULT 0x1
59 #define AL_PCIE_SUBBUS_DEFAULT 0x1
60 #define AL_PCIE_LINKUP_WAIT_INTERVAL 50 /* measured in usec */
61 #define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC 20
63 #define AL_PCIE_LINKUP_RETRIES 8
65 #define AL_PCIE_MAX_32_MEMORY_BAR_SIZE (0x100000000ULL)
66 #define AL_PCIE_MIN_MEMORY_BAR_SIZE (1 << 12)
67 #define AL_PCIE_MIN_IO_BAR_SIZE (1 << 8)
70 * inbound header credits and outstanding outbound reads defaults
72 /** RC - Revisions 1/2 */
73 #define AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT (8)
74 #define AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT (41)
75 #define AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT (25)
76 #define AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT (31)
77 /** EP - Revisions 1/2 */
78 #define AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT (15)
79 #define AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT (76)
80 #define AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT (6)
81 #define AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT (15)
82 /** RC - Revision 3 */
83 #define AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT (32)
84 #define AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT (161)
85 #define AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT (38)
86 #define AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT (60)
87 /** EP - Revision 3 */
88 #define AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT (32)
89 #define AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT (161)
90 #define AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT (38)
91 #define AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT (60)
96 #define AL_PCIE_PARSE_LANES(v) (((1 << v) - 1) << \
97 PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
99 #define AL_PCIE_FLR_DONE_INTERVAL 10
105 al_pcie_port_wr_to_ro_set(struct al_pcie_port *pcie_port, al_bool enable)
107 /* when disabling writes to RO, make sure any previous writes to
108 * config space were committed
110 if (enable == AL_FALSE)
111 al_local_data_memory_barrier();
113 al_reg_write32(&pcie_port->regs->port_regs->rd_only_wr_en,
114 (enable == AL_TRUE) ? 1 : 0);
116 /* when enabling writes to RO, make sure it is committed before trying
117 * to write to RO config space
119 if (enable == AL_TRUE)
120 al_local_data_memory_barrier();
123 /** helper function to access dbi_cs2 registers */
125 al_reg_write32_dbi_cs2(
126 struct al_pcie_port *pcie_port,
131 (pcie_port->rev_id == AL_PCIE_REV_ID_3) ? 0x4000 : 0x1000;
133 al_reg_write32((uint32_t *)((uintptr_t)offset | cs2_bit), val);
137 al_pcie_speed_gen_code(enum al_pcie_link_speed speed)
139 if (speed == AL_PCIE_LINK_SPEED_GEN1)
141 if (speed == AL_PCIE_LINK_SPEED_GEN2)
143 if (speed == AL_PCIE_LINK_SPEED_GEN3)
145 /* must not be reached */
150 al_pcie_port_link_speed_ctrl_set(
151 struct al_pcie_port *pcie_port,
152 enum al_pcie_link_speed max_speed)
154 struct al_pcie_regs *regs = pcie_port->regs;
156 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
158 if (max_speed != AL_PCIE_LINK_SPEED_DEFAULT) {
159 uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(max_speed);
160 al_reg_write32_masked(
161 (uint32_t __iomem *)(regs->core_space[0].pcie_link_cap_base),
163 al_reg_write32_masked(
164 (uint32_t __iomem *)(regs->core_space[0].pcie_cap_base
165 + (AL_PCI_EXP_LNKCTL2 >> 2)),
169 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
173 al_pcie_port_link_config(
174 struct al_pcie_port *pcie_port,
175 const struct al_pcie_link_params *link_params)
177 struct al_pcie_regs *regs = pcie_port->regs;
178 uint8_t max_lanes = pcie_port->max_lanes;
180 if ((link_params->max_payload_size != AL_PCIE_MPS_DEFAULT) &&
181 (link_params->max_payload_size != AL_PCIE_MPS_128) &&
182 (link_params->max_payload_size != AL_PCIE_MPS_256)) {
183 al_err("PCIe %d: unsupported Max Payload Size (%u)\n",
184 pcie_port->port_id, link_params->max_payload_size);
188 al_pcie_port_link_speed_ctrl_set(pcie_port, link_params->max_speed);
190 /* Change Max Payload Size, if needed.
191 * The Max Payload Size is only valid for PF0.
193 if (link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)
194 al_reg_write32_masked(regs->core_space[0].pcie_dev_ctrl_status,
195 PCIE_PORT_DEV_CTRL_STATUS_MPS_MASK,
196 link_params->max_payload_size <<
197 PCIE_PORT_DEV_CTRL_STATUS_MPS_SHIFT);
199 /** Snap from PCIe core spec:
200 * Link Mode Enable. Sets the number of lanes in the link that you want
201 * to connect to the link partner. When you have unused lanes in your
202 * system, then you must change the value in this register to reflect
203 * the number of lanes. You must also change the value in the
204 * "Predetermined Number of Lanes" field of the "Link Width and Speed
205 * Change Control Register".
211 * 111111: x32 (not supported)
213 al_reg_write32_masked(®s->port_regs->gen2_ctrl,
214 PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_MASK,
215 max_lanes << PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_SHIFT);
216 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
217 PCIE_PORT_LINK_CTRL_LINK_CAPABLE_MASK,
218 (max_lanes + (max_lanes-1))
219 << PCIE_PORT_LINK_CTRL_LINK_CAPABLE_SHIFT);
225 al_pcie_port_ram_parity_int_config(
226 struct al_pcie_port *pcie_port,
229 struct al_pcie_regs *regs = pcie_port->regs;
231 al_reg_write32(®s->app.parity->en_core,
232 (enable == AL_TRUE) ? 0xffffffff : 0x0);
234 al_reg_write32_masked(®s->app.int_grp_b->mask,
235 PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE,
236 (enable != AL_TRUE) ?
237 PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE : 0);
242 al_pcie_port_axi_parity_int_config(
243 struct al_pcie_port *pcie_port,
246 struct al_pcie_regs *regs = pcie_port->regs;
247 uint32_t parity_enable_mask = 0xffffffff;
250 * Addressing RMN: 5603
253 * u4_ram2p signal false parity error
256 * Disable parity check for this memory
258 if (pcie_port->rev_id >= AL_PCIE_REV_ID_3)
259 parity_enable_mask &= ~PCIE_AXI_PARITY_EN_AXI_U4_RAM2P;
261 al_reg_write32(regs->axi.parity.en_axi,
262 (enable == AL_TRUE) ? parity_enable_mask : 0x0);
264 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
265 al_reg_write32_masked(regs->axi.ctrl.global,
266 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
267 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
268 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
269 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
270 (enable == AL_TRUE) ?
271 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
272 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
273 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
274 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
275 PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
277 al_reg_write32_masked(regs->axi.ctrl.global,
278 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
279 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
280 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
281 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
282 (enable == AL_TRUE) ?
283 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
284 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
285 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
286 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
287 PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
290 al_reg_write32_masked(®s->axi.int_grp_a->mask,
291 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
292 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
293 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
294 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
295 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI,
296 (enable != AL_TRUE) ?
297 (PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
298 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
299 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
300 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
301 PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI) : 0);
305 al_pcie_port_relaxed_pcie_ordering_config(
306 struct al_pcie_port *pcie_port,
307 struct al_pcie_relaxed_ordering_params *relaxed_ordering_params)
309 struct al_pcie_regs *regs = pcie_port->regs;
310 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
313 * - RC: Rx relaxed ordering only
314 * - EP: TX relaxed ordering only
316 al_bool tx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_FALSE : AL_TRUE);
317 al_bool rx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_TRUE : AL_FALSE);
319 if (relaxed_ordering_params) {
320 tx_relaxed_ordering = relaxed_ordering_params->enable_tx_relaxed_ordering;
321 rx_relaxed_ordering = relaxed_ordering_params->enable_rx_relaxed_ordering;
325 * - disable outbound completion must be stalled behind outbound write
326 * ordering rule enforcement is disabled for root-port
327 * - disables read completion on the master port push slave writes for end-point
329 al_reg_write32_masked(
330 regs->axi.ordering.pos_cntl,
331 PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
332 PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
333 PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS |
334 PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES,
335 (tx_relaxed_ordering ?
336 (PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
337 PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES) : 0) |
338 (rx_relaxed_ordering ?
339 (PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
340 PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS) : 0));
345 void __iomem *pbs_reg_base,
346 void __iomem *pcie_reg_base)
349 uint16_t chip_id_dev;
351 struct al_pbs_regs *pbs_regs = pbs_reg_base;
353 /* get revision ID from PBS' chip_id register */
354 chip_id = al_reg_read32(&pbs_regs->unit.chip_id);
355 chip_id_dev = AL_REG_FIELD_GET(chip_id,
356 PBS_UNIT_CHIP_ID_DEV_ID_MASK,
357 PBS_UNIT_CHIP_ID_DEV_ID_SHIFT);
359 if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V1) {
360 rev_id = AL_PCIE_REV_ID_1;
361 } else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V2) {
362 struct al_pcie_revx_regs __iomem *regs =
363 (struct al_pcie_revx_regs __iomem *)pcie_reg_base;
366 dev_id = al_reg_read32(®s->axi.device_id.device_rev_id) &
367 PCIE_AXI_DEVICE_ID_REG_DEV_ID_MASK;
368 if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X4) {
369 rev_id = AL_PCIE_REV_ID_2;
370 } else if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X8) {
371 rev_id = AL_PCIE_REV_ID_3;
373 al_warn("%s: Revision ID is unknown\n",
378 al_warn("%s: Revision ID is unknown\n",
386 al_pcie_port_lat_rply_timers_config(
387 struct al_pcie_port *pcie_port,
388 const struct al_pcie_latency_replay_timers *lat_rply_timers)
390 struct al_pcie_regs *regs = pcie_port->regs;
393 AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit);
394 AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit);
396 al_reg_write32(®s->port_regs->ack_lat_rply_timer, reg);
401 al_pcie_ib_hcrd_os_ob_reads_config_default(
402 struct al_pcie_port *pcie_port)
405 struct al_pcie_ib_hcrd_os_ob_reads_config ib_hcrd_os_ob_reads_config;
407 switch (al_pcie_operating_mode_get(pcie_port)) {
408 case AL_PCIE_OPERATING_MODE_RC:
409 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
410 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
411 AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT;
412 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
413 AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT;
414 ib_hcrd_os_ob_reads_config.nof_np_hdr =
415 AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT;
416 ib_hcrd_os_ob_reads_config.nof_p_hdr =
417 AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT;
419 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
420 AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT;
421 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
422 AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT;
423 ib_hcrd_os_ob_reads_config.nof_np_hdr =
424 AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT;
425 ib_hcrd_os_ob_reads_config.nof_p_hdr =
426 AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT;
430 case AL_PCIE_OPERATING_MODE_EP:
431 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
432 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
433 AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT;
434 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
435 AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT;
436 ib_hcrd_os_ob_reads_config.nof_np_hdr =
437 AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT;
438 ib_hcrd_os_ob_reads_config.nof_p_hdr =
439 AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT;
441 ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
442 AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT;
443 ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
444 AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT;
445 ib_hcrd_os_ob_reads_config.nof_np_hdr =
446 AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT;
447 ib_hcrd_os_ob_reads_config.nof_p_hdr =
448 AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT;
453 al_err("PCIe %d: outstanding outbound transactions could not be configured - unknown operating mode\n",
458 al_pcie_port_ib_hcrd_os_ob_reads_config(pcie_port, &ib_hcrd_os_ob_reads_config);
461 /** return AL_TRUE if link is up, AL_FALSE otherwise */
464 struct al_pcie_port *pcie_port,
467 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
471 info_0 = al_reg_read32(®s->app.debug->info_0);
473 ltssm_state = AL_REG_FIELD_GET(info_0,
474 PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK,
475 PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT);
477 al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n",
478 pcie_port->port_id, info_0, ltssm_state);
481 *ltssm_ret = ltssm_state;
483 if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) ||
484 (ltssm_state == AL_PCIE_LTSSM_STATE_L0S))
490 al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port,
491 const struct al_pcie_gen2_params *gen2_params)
493 struct al_pcie_regs *regs = pcie_port->regs;
496 al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n",
498 gen2_params->tx_swing_low ? "Low" : "Full",
499 gen2_params->tx_compliance_receive_enable? "enable" : "disable",
500 gen2_params->set_deemphasis? "enable" : "disable");
502 gen2_ctrl = al_reg_read32(®s->port_regs->gen2_ctrl);
504 if (gen2_params->tx_swing_low)
505 AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
507 AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
509 if (gen2_params->tx_compliance_receive_enable)
510 AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
512 AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
514 if (gen2_params->set_deemphasis)
515 AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
517 AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
519 al_reg_write32(®s->port_regs->gen2_ctrl, gen2_ctrl);
526 gen3_lane_eq_param_to_val(const struct al_pcie_gen3_lane_eq_params *eq_params)
528 uint16_t eq_control = 0;
530 eq_control = eq_params->downstream_port_transmitter_preset & 0xF;
531 eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4;
532 eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8;
533 eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12;
539 al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
540 const struct al_pcie_gen3_params *gen3_params)
542 struct al_pcie_regs *regs = pcie_port->regs;
544 uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(regs->core_space[0].pcie_sec_ext_cap_base + (0xC >> 2));
547 al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n",
549 gen3_params->perform_eq ? "enable" : "disable",
550 gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable");
552 if (gen3_params->perform_eq)
553 AL_REG_BIT_SET(reg, 0);
554 if (gen3_params->interrupt_enable_on_link_eq_request)
555 AL_REG_BIT_SET(reg, 1);
557 al_reg_write32(regs->core_space[0].pcie_sec_ext_cap_base + (4 >> 2),
560 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
562 for (i = 0; i < gen3_params->eq_params_elements; i += 2) {
563 uint32_t eq_control =
564 (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) |
565 (uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16;
567 al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1);
568 al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control);
571 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
573 reg = al_reg_read32(®s->port_regs->gen3_ctrl);
574 if (gen3_params->eq_disable)
575 AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
577 AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
579 if (gen3_params->eq_phase2_3_disable)
580 AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
582 AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
584 al_reg_write32(®s->port_regs->gen3_ctrl, reg);
587 AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK,
588 PCIE_PORT_GEN3_EQ_LF_SHIFT,
589 gen3_params->local_lf);
590 AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK,
591 PCIE_PORT_GEN3_EQ_FS_SHIFT,
592 gen3_params->local_fs);
594 al_reg_write32(®s->port_regs->gen3_eq_fs_lf, reg);
597 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK,
598 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT,
599 gen3_params->local_lf);
600 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK,
601 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT,
602 gen3_params->local_fs);
603 al_reg_write32(regs->axi.conf.zero_lane0, reg);
604 al_reg_write32(regs->axi.conf.zero_lane1, reg);
605 al_reg_write32(regs->axi.conf.zero_lane2, reg);
606 al_reg_write32(regs->axi.conf.zero_lane3, reg);
607 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
608 al_reg_write32(regs->axi.conf.zero_lane4, reg);
609 al_reg_write32(regs->axi.conf.zero_lane5, reg);
610 al_reg_write32(regs->axi.conf.zero_lane6, reg);
611 al_reg_write32(regs->axi.conf.zero_lane7, reg);
615 * Gen3 EQ Control Register:
616 * - Preset Request Vector - request 9
617 * - Behavior After 24 ms Timeout (when optimal settings are not
618 * found): Recovery.Equalization.RcvrLock
619 * - Phase2_3 2 ms Timeout Disable
620 * - Feedback Mode - Figure Of Merit
623 al_reg_write32(®s->port_regs->gen3_eq_ctrl, reg);
629 al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
630 const struct al_pcie_pf_config_params *pf_params)
632 struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
633 struct al_pcie_regs *regs = pcie_port->regs;
634 unsigned int pf_num = pcie_pf->pf_num;
638 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
640 /* Disable D1 and D3hot capabilities */
641 if (pf_params->cap_d1_d3hot_dis)
642 al_reg_write32_masked(
643 regs->core_space[pf_num].pcie_pm_cap_base,
644 AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
646 /* Set/Clear FLR bit */
647 if (pf_params->cap_flr_dis)
648 al_reg_write32_masked(
649 regs->core_space[pf_num].pcie_dev_cap_base,
650 AL_PCI_EXP_DEVCAP_FLR, 0);
652 al_reg_write32_masked(
653 regs->core_space[pcie_pf->pf_num].pcie_dev_cap_base,
654 AL_PCI_EXP_DEVCAP_FLR, AL_PCI_EXP_DEVCAP_FLR);
656 /* Disable ASPM capability */
657 if (pf_params->cap_aspm_dis) {
658 al_reg_write32_masked(
659 regs->core_space[pf_num].pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
660 AL_PCI_EXP_LNKCAP_ASPMS, 0);
663 if (!pf_params->bar_params_valid) {
668 for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */
669 const struct al_pcie_ep_bar_params *params = pf_params->bar_params + bar_idx;
672 uint32_t __iomem *bar_addr = ®s->core_space[pf_num].config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx];
674 if (params->enable) {
675 uint64_t size = params->size;
677 if (params->memory_64_bit) {
678 const struct al_pcie_ep_bar_params *next_params = params + 1;
679 /* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */
685 /* next BAR must be disabled */
686 if (next_params->enable) {
691 /* 64 bar must be memory bar */
692 if (!params->memory_space) {
697 if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE)
699 /* 32 bit space can't be prefetchable */
700 if (params->memory_is_prefetchable) {
706 if (params->memory_space) {
707 if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
708 al_err("PCIe %d: memory BAR %d: size (0x%jx) less that minimal allowed value\n",
709 pcie_port->port_id, bar_idx,
715 /* IO can't be prefetchable */
716 if (params->memory_is_prefetchable) {
721 if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
722 al_err("PCIe %d: IO BAR %d: size (0x%jx) less that minimal allowed value\n",
723 pcie_port->port_id, bar_idx,
730 /* size must be power of 2 */
731 if (size & (size - 1)) {
732 al_err("PCIe %d: BAR %d:size (0x%jx) must be "
734 pcie_port->port_id, bar_idx, (uintmax_t)size);
739 /* If BAR is 64-bit, disable the next BAR before
740 * configuring this one
742 if (params->memory_64_bit)
743 al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, 0);
745 mask = 1; /* enable bit*/
746 mask |= (params->size - 1) & 0xFFFFFFFF;
748 al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
750 if (params->memory_space == AL_FALSE)
751 ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO;
752 if (params->memory_64_bit)
753 ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64;
754 if (params->memory_is_prefetchable)
755 ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH;
756 al_reg_write32(bar_addr, ctrl);
758 if (params->memory_64_bit) {
759 mask = ((params->size - 1) >> 32) & 0xFFFFFFFF;
760 al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, mask);
764 al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
766 if (params->enable && params->memory_64_bit)
772 if (pf_params->exp_bar_params.enable) {
773 if (pcie_port->rev_id != AL_PCIE_REV_ID_3) {
774 al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id);
779 uint32_t __iomem *exp_rom_bar_addr =
780 ®s->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
781 uint32_t mask = 1; /* enable bit*/
782 mask |= (pf_params->exp_bar_params.size - 1) & 0xFFFFFFFF;
783 al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , mask);
785 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
786 /* Disable exp ROM */
787 uint32_t __iomem *exp_rom_bar_addr =
788 ®s->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
789 al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , 0);
792 /* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
793 if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
794 al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_0, (1 << 21));
795 } else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
796 (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
797 al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_3, (1 << 18));
805 * Addressing RMN: 1547
808 * 1. Whenever writing to 0x2xx offset, the write also happens to
809 * 0x3xx address, meaning two registers are written instead of one.
810 * 2. Read and write from 0x3xx work ok.
813 * Backup the value of the app.int_grp_a.mask_a register, because
814 * app.int_grp_a.mask_clear_a gets overwritten during the write to
815 * app.soc.mask_msi_leg_0 register.
816 * Restore the original value after the write to app.soc.mask_msi_leg_0
819 if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
820 al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
821 } else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
822 (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
823 al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_3, (1 << 19));
833 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
839 al_pcie_port_sris_config(
840 struct al_pcie_port *pcie_port,
841 struct al_pcie_sris_params *sris_params,
842 enum al_pcie_link_speed link_speed)
845 struct al_pcie_regs *regs = pcie_port->regs;
847 if (sris_params->use_defaults) {
848 sris_params->kp_counter_gen3 = (pcie_port->rev_id > AL_PCIE_REV_ID_1) ?
849 PCIE_SRIS_KP_COUNTER_GEN3_DEFAULT_VAL : 0;
850 sris_params->kp_counter_gen21 = PCIE_SRIS_KP_COUNTER_GEN21_DEFAULT_VAL;
852 al_dbg("PCIe %d: configuring SRIS with default values kp_gen3[%d] kp_gen21[%d]\n",
854 sris_params->kp_counter_gen3,
855 sris_params->kp_counter_gen21);
858 switch (pcie_port->rev_id) {
859 case AL_PCIE_REV_ID_3:
860 al_reg_write32_masked(®s->app.cfg_func_ext->cfg,
861 PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE,
862 PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE);
863 case AL_PCIE_REV_ID_2:
864 al_reg_write32_masked(regs->app.global_ctrl.sris_kp_counter,
865 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_MASK |
866 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_MASK |
867 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN,
868 (sris_params->kp_counter_gen3 <<
869 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_SHIFT) |
870 (sris_params->kp_counter_gen21 <<
871 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_SHIFT) |
872 PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN);
875 case AL_PCIE_REV_ID_1:
876 if ((link_speed == AL_PCIE_LINK_SPEED_GEN3) && (sris_params->kp_counter_gen3)) {
877 al_err("PCIe %d: cannot config Gen%d SRIS with rev_id[%d]\n",
878 pcie_port->port_id, al_pcie_speed_gen_code(link_speed),
883 al_reg_write32_masked(®s->port_regs->filter_mask_reg_1,
884 PCIE_FLT_MASK_SKP_INT_VAL_MASK,
885 sris_params->kp_counter_gen21);
889 al_err("PCIe %d: SRIS config is not supported in rev_id[%d]\n",
890 pcie_port->port_id, pcie_port->rev_id);
899 al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port)
901 struct al_pcie_regs *regs = pcie_port->regs;
903 al_reg_write32_masked(
904 ®s->port_regs->vc0_posted_rcv_q_ctrl,
905 RADM_PQ_HCRD_VC0_MASK,
906 (pcie_port->ib_hcrd_config.nof_p_hdr - 1)
907 << RADM_PQ_HCRD_VC0_SHIFT);
909 al_reg_write32_masked(
910 ®s->port_regs->vc0_non_posted_rcv_q_ctrl,
911 RADM_NPQ_HCRD_VC0_MASK,
912 (pcie_port->ib_hcrd_config.nof_np_hdr - 1)
913 << RADM_NPQ_HCRD_VC0_SHIFT);
917 al_pcie_port_max_num_of_pfs_get(struct al_pcie_port *pcie_port)
919 struct al_pcie_regs *regs = pcie_port->regs;
920 uint32_t max_func_num;
921 uint32_t max_num_of_pfs;
924 * Only in REV3, when port is already enabled, max_num_of_pfs is already
925 * initialized, return it. Otherwise, return default: 1 PF
927 if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
928 && al_pcie_port_is_enabled(pcie_port)) {
929 max_func_num = al_reg_read32(®s->port_regs->timer_ctrl_max_func_num);
930 max_num_of_pfs = AL_REG_FIELD_GET(max_func_num, PCIE_PORT_GEN3_MAX_FUNC_NUM, 0) + 1;
931 return max_num_of_pfs;
936 /** Enable ecrc generation in outbound atu (Addressing RMN: 5119) */
937 static void al_pcie_ecrc_gen_ob_atu_enable(struct al_pcie_port *pcie_port, unsigned int pf_num)
939 struct al_pcie_regs *regs = pcie_port->regs;
940 int max_ob_atu = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
941 AL_PCIE_REV_3_ATU_NUM_OUTBOUND_REGIONS : AL_PCIE_REV_1_2_ATU_NUM_OUTBOUND_REGIONS;
943 for (i = 0; i < max_ob_atu; i++) {
946 unsigned int func_num;
947 AL_REG_FIELD_SET(reg, 0xF, 0, i);
948 AL_REG_BIT_VAL_SET(reg, 31, AL_PCIE_ATU_DIR_OUTBOUND);
949 al_reg_write32(®s->port_regs->iatu.index, reg);
950 reg = al_reg_read32(®s->port_regs->iatu.cr2);
951 enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
952 reg = al_reg_read32(®s->port_regs->iatu.cr1);
953 func_num = AL_REG_FIELD_GET(reg,
954 PCIE_IATU_CR1_FUNC_NUM_MASK,
955 PCIE_IATU_CR1_FUNC_NUM_SHIFT);
956 if ((enable == AL_TRUE) && (pf_num == func_num)) {
958 AL_REG_BIT_SET(reg, 8);
959 al_reg_write32(®s->port_regs->iatu.cr1, reg);
964 /******************************************************************************/
965 /***************************** API Implementation *****************************/
966 /******************************************************************************/
968 /*************************** PCIe Initialization API **************************/
971 * Initializes a PCIe port handle structure
972 * Caution: this function should not read/write to any register except for
973 * reading RO register (REV_ID for example)
976 al_pcie_port_handle_init(
977 struct al_pcie_port *pcie_port,
978 void __iomem *pcie_reg_base,
979 void __iomem *pbs_reg_base,
980 unsigned int port_id)
984 pcie_port->pcie_reg_base = pcie_reg_base;
985 pcie_port->regs = &pcie_port->regs_ptrs;
986 pcie_port->ex_regs = NULL;
987 pcie_port->pbs_regs = pbs_reg_base;
988 pcie_port->port_id = port_id;
989 pcie_port->max_lanes = 0;
991 ret = al_pcie_rev_id_get(pbs_reg_base, pcie_reg_base);
995 pcie_port->rev_id = ret;
998 al_memset(pcie_port->regs, 0, sizeof(struct al_pcie_regs));
1000 if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
1001 struct al_pcie_rev1_regs __iomem *regs =
1002 (struct al_pcie_rev1_regs __iomem *)pcie_reg_base;
1004 pcie_port->regs->axi.ctrl.global = ®s->axi.ctrl.global;
1005 pcie_port->regs->axi.ctrl.master_rctl = ®s->axi.ctrl.master_rctl;
1006 pcie_port->regs->axi.ctrl.master_ctl = ®s->axi.ctrl.master_ctl;
1007 pcie_port->regs->axi.ctrl.master_arctl = ®s->axi.ctrl.master_arctl;
1008 pcie_port->regs->axi.ctrl.master_awctl = ®s->axi.ctrl.master_awctl;
1009 pcie_port->regs->axi.ctrl.slv_ctl = ®s->axi.ctrl.slv_ctl;
1010 pcie_port->regs->axi.ob_ctrl.cfg_target_bus = ®s->axi.ob_ctrl.cfg_target_bus;
1011 pcie_port->regs->axi.ob_ctrl.cfg_control = ®s->axi.ob_ctrl.cfg_control;
1012 pcie_port->regs->axi.ob_ctrl.io_start_l = ®s->axi.ob_ctrl.io_start_l;
1013 pcie_port->regs->axi.ob_ctrl.io_start_h = ®s->axi.ob_ctrl.io_start_h;
1014 pcie_port->regs->axi.ob_ctrl.io_limit_l = ®s->axi.ob_ctrl.io_limit_l;
1015 pcie_port->regs->axi.ob_ctrl.io_limit_h = ®s->axi.ob_ctrl.io_limit_h;
1016 pcie_port->regs->axi.pcie_global.conf = ®s->axi.pcie_global.conf;
1017 pcie_port->regs->axi.conf.zero_lane0 = ®s->axi.conf.zero_lane0;
1018 pcie_port->regs->axi.conf.zero_lane1 = ®s->axi.conf.zero_lane1;
1019 pcie_port->regs->axi.conf.zero_lane2 = ®s->axi.conf.zero_lane2;
1020 pcie_port->regs->axi.conf.zero_lane3 = ®s->axi.conf.zero_lane3;
1021 pcie_port->regs->axi.status.lane[0] = ®s->axi.status.lane0;
1022 pcie_port->regs->axi.status.lane[1] = ®s->axi.status.lane1;
1023 pcie_port->regs->axi.status.lane[2] = ®s->axi.status.lane2;
1024 pcie_port->regs->axi.status.lane[3] = ®s->axi.status.lane3;
1025 pcie_port->regs->axi.parity.en_axi = ®s->axi.parity.en_axi;
1026 pcie_port->regs->axi.ordering.pos_cntl = ®s->axi.ordering.pos_cntl;
1027 pcie_port->regs->axi.pre_configuration.pcie_core_setup = ®s->axi.pre_configuration.pcie_core_setup;
1028 pcie_port->regs->axi.init_fc.cfg = ®s->axi.init_fc.cfg;
1029 pcie_port->regs->axi.int_grp_a = ®s->axi.int_grp_a;
1031 pcie_port->regs->app.global_ctrl.port_init = ®s->app.global_ctrl.port_init;
1032 pcie_port->regs->app.global_ctrl.pm_control = ®s->app.global_ctrl.pm_control;
1033 pcie_port->regs->app.global_ctrl.events_gen[0] = ®s->app.global_ctrl.events_gen;
1034 pcie_port->regs->app.debug = ®s->app.debug;
1035 pcie_port->regs->app.soc_int[0].status_0 = ®s->app.soc_int.status_0;
1036 pcie_port->regs->app.soc_int[0].status_1 = ®s->app.soc_int.status_1;
1037 pcie_port->regs->app.soc_int[0].status_2 = ®s->app.soc_int.status_2;
1038 pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = ®s->app.soc_int.mask_inta_leg_0;
1039 pcie_port->regs->app.soc_int[0].mask_inta_leg_1 = ®s->app.soc_int.mask_inta_leg_1;
1040 pcie_port->regs->app.soc_int[0].mask_inta_leg_2 = ®s->app.soc_int.mask_inta_leg_2;
1041 pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = ®s->app.soc_int.mask_msi_leg_0;
1042 pcie_port->regs->app.soc_int[0].mask_msi_leg_1 = ®s->app.soc_int.mask_msi_leg_1;
1043 pcie_port->regs->app.soc_int[0].mask_msi_leg_2 = ®s->app.soc_int.mask_msi_leg_2;
1044 pcie_port->regs->app.ctrl_gen = ®s->app.ctrl_gen;
1045 pcie_port->regs->app.parity = ®s->app.parity;
1046 pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1047 pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1048 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a;
1049 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b;
1051 pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1052 pcie_port->regs->core_space[0].pcie_pm_cap_base = ®s->core_space.pcie_pm_cap_base;
1053 pcie_port->regs->core_space[0].pcie_cap_base = ®s->core_space.pcie_cap_base;
1054 pcie_port->regs->core_space[0].pcie_dev_cap_base = ®s->core_space.pcie_dev_cap_base;
1055 pcie_port->regs->core_space[0].pcie_dev_ctrl_status = ®s->core_space.pcie_dev_ctrl_status;
1056 pcie_port->regs->core_space[0].pcie_link_cap_base = ®s->core_space.pcie_link_cap_base;
1057 pcie_port->regs->core_space[0].msix_cap_base = ®s->core_space.msix_cap_base;
1058 pcie_port->regs->core_space[0].aer = ®s->core_space.aer;
1059 pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = ®s->core_space.pcie_sec_ext_cap_base;
1061 pcie_port->regs->port_regs = ®s->core_space.port_regs;
1063 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_2) {
1064 struct al_pcie_rev2_regs __iomem *regs =
1065 (struct al_pcie_rev2_regs __iomem *)pcie_reg_base;
1067 pcie_port->regs->axi.ctrl.global = ®s->axi.ctrl.global;
1068 pcie_port->regs->axi.ctrl.master_rctl = ®s->axi.ctrl.master_rctl;
1069 pcie_port->regs->axi.ctrl.master_ctl = ®s->axi.ctrl.master_ctl;
1070 pcie_port->regs->axi.ctrl.master_arctl = ®s->axi.ctrl.master_arctl;
1071 pcie_port->regs->axi.ctrl.master_awctl = ®s->axi.ctrl.master_awctl;
1072 pcie_port->regs->axi.ctrl.slv_ctl = ®s->axi.ctrl.slv_ctl;
1073 pcie_port->regs->axi.ob_ctrl.cfg_target_bus = ®s->axi.ob_ctrl.cfg_target_bus;
1074 pcie_port->regs->axi.ob_ctrl.cfg_control = ®s->axi.ob_ctrl.cfg_control;
1075 pcie_port->regs->axi.ob_ctrl.io_start_l = ®s->axi.ob_ctrl.io_start_l;
1076 pcie_port->regs->axi.ob_ctrl.io_start_h = ®s->axi.ob_ctrl.io_start_h;
1077 pcie_port->regs->axi.ob_ctrl.io_limit_l = ®s->axi.ob_ctrl.io_limit_l;
1078 pcie_port->regs->axi.ob_ctrl.io_limit_h = ®s->axi.ob_ctrl.io_limit_h;
1079 pcie_port->regs->axi.ob_ctrl.tgtid_reg_ovrd = ®s->axi.ob_ctrl.tgtid_reg_ovrd;
1080 pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_sel = ®s->axi.ob_ctrl.addr_high_reg_ovrd_sel;
1081 pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_value = ®s->axi.ob_ctrl.addr_high_reg_ovrd_value;
1082 pcie_port->regs->axi.ob_ctrl.addr_size_replace = ®s->axi.ob_ctrl.addr_size_replace;
1083 pcie_port->regs->axi.pcie_global.conf = ®s->axi.pcie_global.conf;
1084 pcie_port->regs->axi.conf.zero_lane0 = ®s->axi.conf.zero_lane0;
1085 pcie_port->regs->axi.conf.zero_lane1 = ®s->axi.conf.zero_lane1;
1086 pcie_port->regs->axi.conf.zero_lane2 = ®s->axi.conf.zero_lane2;
1087 pcie_port->regs->axi.conf.zero_lane3 = ®s->axi.conf.zero_lane3;
1088 pcie_port->regs->axi.status.lane[0] = ®s->axi.status.lane0;
1089 pcie_port->regs->axi.status.lane[1] = ®s->axi.status.lane1;
1090 pcie_port->regs->axi.status.lane[2] = ®s->axi.status.lane2;
1091 pcie_port->regs->axi.status.lane[3] = ®s->axi.status.lane3;
1092 pcie_port->regs->axi.parity.en_axi = ®s->axi.parity.en_axi;
1093 pcie_port->regs->axi.ordering.pos_cntl = ®s->axi.ordering.pos_cntl;
1094 pcie_port->regs->axi.pre_configuration.pcie_core_setup = ®s->axi.pre_configuration.pcie_core_setup;
1095 pcie_port->regs->axi.init_fc.cfg = ®s->axi.init_fc.cfg;
1096 pcie_port->regs->axi.int_grp_a = ®s->axi.int_grp_a;
1098 pcie_port->regs->app.global_ctrl.port_init = ®s->app.global_ctrl.port_init;
1099 pcie_port->regs->app.global_ctrl.pm_control = ®s->app.global_ctrl.pm_control;
1100 pcie_port->regs->app.global_ctrl.events_gen[0] = ®s->app.global_ctrl.events_gen;
1101 pcie_port->regs->app.global_ctrl.corr_err_sts_int = ®s->app.global_ctrl.pended_corr_err_sts_int;
1102 pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = ®s->app.global_ctrl.pended_uncorr_err_sts_int;
1103 pcie_port->regs->app.global_ctrl.sris_kp_counter = ®s->app.global_ctrl.sris_kp_counter_value;
1104 pcie_port->regs->app.debug = ®s->app.debug;
1105 pcie_port->regs->app.ap_user_send_msg = ®s->app.ap_user_send_msg;
1106 pcie_port->regs->app.soc_int[0].status_0 = ®s->app.soc_int.status_0;
1107 pcie_port->regs->app.soc_int[0].status_1 = ®s->app.soc_int.status_1;
1108 pcie_port->regs->app.soc_int[0].status_2 = ®s->app.soc_int.status_2;
1109 pcie_port->regs->app.soc_int[0].status_3 = ®s->app.soc_int.status_3;
1110 pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = ®s->app.soc_int.mask_inta_leg_0;
1111 pcie_port->regs->app.soc_int[0].mask_inta_leg_1 = ®s->app.soc_int.mask_inta_leg_1;
1112 pcie_port->regs->app.soc_int[0].mask_inta_leg_2 = ®s->app.soc_int.mask_inta_leg_2;
1113 pcie_port->regs->app.soc_int[0].mask_inta_leg_3 = ®s->app.soc_int.mask_inta_leg_3;
1114 pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = ®s->app.soc_int.mask_msi_leg_0;
1115 pcie_port->regs->app.soc_int[0].mask_msi_leg_1 = ®s->app.soc_int.mask_msi_leg_1;
1116 pcie_port->regs->app.soc_int[0].mask_msi_leg_2 = ®s->app.soc_int.mask_msi_leg_2;
1117 pcie_port->regs->app.soc_int[0].mask_msi_leg_3 = ®s->app.soc_int.mask_msi_leg_3;
1118 pcie_port->regs->app.ctrl_gen = ®s->app.ctrl_gen;
1119 pcie_port->regs->app.parity = ®s->app.parity;
1120 pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1121 pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1122 pcie_port->regs->app.status_per_func[0] = ®s->app.status_per_func;
1123 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a;
1124 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b;
1126 pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1127 pcie_port->regs->core_space[0].pcie_pm_cap_base = ®s->core_space.pcie_pm_cap_base;
1128 pcie_port->regs->core_space[0].pcie_cap_base = ®s->core_space.pcie_cap_base;
1129 pcie_port->regs->core_space[0].pcie_dev_cap_base = ®s->core_space.pcie_dev_cap_base;
1130 pcie_port->regs->core_space[0].pcie_dev_ctrl_status = ®s->core_space.pcie_dev_ctrl_status;
1131 pcie_port->regs->core_space[0].pcie_link_cap_base = ®s->core_space.pcie_link_cap_base;
1132 pcie_port->regs->core_space[0].msix_cap_base = ®s->core_space.msix_cap_base;
1133 pcie_port->regs->core_space[0].aer = ®s->core_space.aer;
1134 pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = ®s->core_space.pcie_sec_ext_cap_base;
1136 pcie_port->regs->port_regs = ®s->core_space.port_regs;
1138 } else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1139 struct al_pcie_rev3_regs __iomem *regs =
1140 (struct al_pcie_rev3_regs __iomem *)pcie_reg_base;
1141 pcie_port->regs->axi.ctrl.global = ®s->axi.ctrl.global;
1142 pcie_port->regs->axi.ctrl.master_rctl = ®s->axi.ctrl.master_rctl;
1143 pcie_port->regs->axi.ctrl.master_ctl = ®s->axi.ctrl.master_ctl;
1144 pcie_port->regs->axi.ctrl.master_arctl = ®s->axi.ctrl.master_arctl;
1145 pcie_port->regs->axi.ctrl.master_awctl = ®s->axi.ctrl.master_awctl;
1146 pcie_port->regs->axi.ctrl.slv_ctl = ®s->axi.ctrl.slv_ctl;
1147 pcie_port->regs->axi.ob_ctrl.cfg_target_bus = ®s->axi.ob_ctrl.cfg_target_bus;
1148 pcie_port->regs->axi.ob_ctrl.cfg_control = ®s->axi.ob_ctrl.cfg_control;
1149 pcie_port->regs->axi.ob_ctrl.io_start_l = ®s->axi.ob_ctrl.io_start_l;
1150 pcie_port->regs->axi.ob_ctrl.io_start_h = ®s->axi.ob_ctrl.io_start_h;
1151 pcie_port->regs->axi.ob_ctrl.io_limit_l = ®s->axi.ob_ctrl.io_limit_l;
1152 pcie_port->regs->axi.ob_ctrl.io_limit_h = ®s->axi.ob_ctrl.io_limit_h;
1153 pcie_port->regs->axi.ob_ctrl.io_addr_mask_h = ®s->axi.ob_ctrl.io_addr_mask_h;
1154 pcie_port->regs->axi.ob_ctrl.ar_msg_addr_mask_h = ®s->axi.ob_ctrl.ar_msg_addr_mask_h;
1155 pcie_port->regs->axi.ob_ctrl.aw_msg_addr_mask_h = ®s->axi.ob_ctrl.aw_msg_addr_mask_h;
1156 pcie_port->regs->axi.ob_ctrl.tgtid_reg_ovrd = ®s->axi.ob_ctrl.tgtid_reg_ovrd;
1157 pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_sel = ®s->axi.ob_ctrl.addr_high_reg_ovrd_sel;
1158 pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_value = ®s->axi.ob_ctrl.addr_high_reg_ovrd_value;
1159 pcie_port->regs->axi.ob_ctrl.addr_size_replace = ®s->axi.ob_ctrl.addr_size_replace;
1160 pcie_port->regs->axi.pcie_global.conf = ®s->axi.pcie_global.conf;
1161 pcie_port->regs->axi.conf.zero_lane0 = ®s->axi.conf.zero_lane0;
1162 pcie_port->regs->axi.conf.zero_lane1 = ®s->axi.conf.zero_lane1;
1163 pcie_port->regs->axi.conf.zero_lane2 = ®s->axi.conf.zero_lane2;
1164 pcie_port->regs->axi.conf.zero_lane3 = ®s->axi.conf.zero_lane3;
1165 pcie_port->regs->axi.conf.zero_lane4 = ®s->axi.conf.zero_lane4;
1166 pcie_port->regs->axi.conf.zero_lane5 = ®s->axi.conf.zero_lane5;
1167 pcie_port->regs->axi.conf.zero_lane6 = ®s->axi.conf.zero_lane6;
1168 pcie_port->regs->axi.conf.zero_lane7 = ®s->axi.conf.zero_lane7;
1169 pcie_port->regs->axi.status.lane[0] = ®s->axi.status.lane0;
1170 pcie_port->regs->axi.status.lane[1] = ®s->axi.status.lane1;
1171 pcie_port->regs->axi.status.lane[2] = ®s->axi.status.lane2;
1172 pcie_port->regs->axi.status.lane[3] = ®s->axi.status.lane3;
1173 pcie_port->regs->axi.status.lane[4] = ®s->axi.status.lane4;
1174 pcie_port->regs->axi.status.lane[5] = ®s->axi.status.lane5;
1175 pcie_port->regs->axi.status.lane[6] = ®s->axi.status.lane6;
1176 pcie_port->regs->axi.status.lane[7] = ®s->axi.status.lane7;
1177 pcie_port->regs->axi.parity.en_axi = ®s->axi.parity.en_axi;
1178 pcie_port->regs->axi.ordering.pos_cntl = ®s->axi.ordering.pos_cntl;
1179 pcie_port->regs->axi.pre_configuration.pcie_core_setup = ®s->axi.pre_configuration.pcie_core_setup;
1180 pcie_port->regs->axi.init_fc.cfg = ®s->axi.init_fc.cfg;
1181 pcie_port->regs->axi.int_grp_a = ®s->axi.int_grp_a;
1182 pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_0 = ®s->axi.axi_attr_ovrd.write_msg_ctrl_0;
1183 pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_1 = ®s->axi.axi_attr_ovrd.write_msg_ctrl_1;
1184 pcie_port->regs->axi.axi_attr_ovrd.pf_sel = ®s->axi.axi_attr_ovrd.pf_sel;
1186 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1187 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_0 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_0;
1188 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_1 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_1;
1189 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_2 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_2;
1190 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_3 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_3;
1191 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_4 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_4;
1192 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_5 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_5;
1193 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_6 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_6;
1194 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_7 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_7;
1195 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_8 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_8;
1196 pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_9 = ®s->axi.pf_axi_attr_ovrd[i].func_ctrl_9;
1199 pcie_port->regs->axi.msg_attr_axuser_table.entry_vec = ®s->axi.msg_attr_axuser_table.entry_vec;
1201 pcie_port->regs->app.global_ctrl.port_init = ®s->app.global_ctrl.port_init;
1202 pcie_port->regs->app.global_ctrl.pm_control = ®s->app.global_ctrl.pm_control;
1203 pcie_port->regs->app.global_ctrl.corr_err_sts_int = ®s->app.global_ctrl.pended_corr_err_sts_int;
1204 pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = ®s->app.global_ctrl.pended_uncorr_err_sts_int;
1206 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1207 pcie_port->regs->app.global_ctrl.events_gen[i] = ®s->app.events_gen_per_func[i].events_gen;
1210 pcie_port->regs->app.global_ctrl.sris_kp_counter = ®s->app.global_ctrl.sris_kp_counter_value;
1211 pcie_port->regs->app.debug = ®s->app.debug;
1213 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1214 pcie_port->regs->app.soc_int[i].status_0 = ®s->app.soc_int_per_func[i].status_0;
1215 pcie_port->regs->app.soc_int[i].status_1 = ®s->app.soc_int_per_func[i].status_1;
1216 pcie_port->regs->app.soc_int[i].status_2 = ®s->app.soc_int_per_func[i].status_2;
1217 pcie_port->regs->app.soc_int[i].status_3 = ®s->app.soc_int_per_func[i].status_3;
1218 pcie_port->regs->app.soc_int[i].mask_inta_leg_0 = ®s->app.soc_int_per_func[i].mask_inta_leg_0;
1219 pcie_port->regs->app.soc_int[i].mask_inta_leg_1 = ®s->app.soc_int_per_func[i].mask_inta_leg_1;
1220 pcie_port->regs->app.soc_int[i].mask_inta_leg_2 = ®s->app.soc_int_per_func[i].mask_inta_leg_2;
1221 pcie_port->regs->app.soc_int[i].mask_inta_leg_3 = ®s->app.soc_int_per_func[i].mask_inta_leg_3;
1222 pcie_port->regs->app.soc_int[i].mask_msi_leg_0 = ®s->app.soc_int_per_func[i].mask_msi_leg_0;
1223 pcie_port->regs->app.soc_int[i].mask_msi_leg_1 = ®s->app.soc_int_per_func[i].mask_msi_leg_1;
1224 pcie_port->regs->app.soc_int[i].mask_msi_leg_2 = ®s->app.soc_int_per_func[i].mask_msi_leg_2;
1225 pcie_port->regs->app.soc_int[i].mask_msi_leg_3 = ®s->app.soc_int_per_func[i].mask_msi_leg_3;
1228 pcie_port->regs->app.ap_user_send_msg = ®s->app.ap_user_send_msg;
1229 pcie_port->regs->app.ctrl_gen = ®s->app.ctrl_gen;
1230 pcie_port->regs->app.parity = ®s->app.parity;
1231 pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1232 pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1233 pcie_port->regs->app.cfg_func_ext = ®s->app.cfg_func_ext;
1235 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++)
1236 pcie_port->regs->app.status_per_func[i] = ®s->app.status_per_func[i];
1238 pcie_port->regs->app.int_grp_a = ®s->app.int_grp_a;
1239 pcie_port->regs->app.int_grp_b = ®s->app.int_grp_b;
1240 pcie_port->regs->app.int_grp_c = ®s->app.int_grp_c;
1241 pcie_port->regs->app.int_grp_d = ®s->app.int_grp_d;
1243 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1244 pcie_port->regs->core_space[i].config_header = regs->core_space.func[i].config_header;
1245 pcie_port->regs->core_space[i].pcie_pm_cap_base = ®s->core_space.func[i].pcie_pm_cap_base;
1246 pcie_port->regs->core_space[i].pcie_cap_base = ®s->core_space.func[i].pcie_cap_base;
1247 pcie_port->regs->core_space[i].pcie_dev_cap_base = ®s->core_space.func[i].pcie_dev_cap_base;
1248 pcie_port->regs->core_space[i].pcie_dev_ctrl_status = ®s->core_space.func[i].pcie_dev_ctrl_status;
1249 pcie_port->regs->core_space[i].pcie_link_cap_base = ®s->core_space.func[i].pcie_link_cap_base;
1250 pcie_port->regs->core_space[i].msix_cap_base = ®s->core_space.func[i].msix_cap_base;
1251 pcie_port->regs->core_space[i].aer = ®s->core_space.func[i].aer;
1252 pcie_port->regs->core_space[i].tph_cap_base = ®s->core_space.func[i].tph_cap_base;
1256 /* secondary extension capability only for PF0 */
1257 pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = ®s->core_space.func[0].pcie_sec_ext_cap_base;
1259 pcie_port->regs->port_regs = ®s->core_space.func[0].port_regs;
1262 al_warn("%s: Revision ID is unknown\n",
1267 /* set maximum number of physical functions */
1268 pcie_port->max_num_of_pfs = al_pcie_port_max_num_of_pfs_get(pcie_port);
1270 /* Clear 'nof_p_hdr' & 'nof_np_hdr' to later know if they where changed by the user */
1271 pcie_port->ib_hcrd_config.nof_np_hdr = 0;
1272 pcie_port->ib_hcrd_config.nof_p_hdr = 0;
1274 al_dbg("pcie port handle initialized. port id: %d, rev_id %d, regs base %p\n",
1275 port_id, pcie_port->rev_id, pcie_reg_base);
1280 * Initializes a PCIe Physical function handle structure
1281 * Caution: this function should not read/write to any register except for
1282 * reading RO register (REV_ID for example)
1285 al_pcie_pf_handle_init(
1286 struct al_pcie_pf *pcie_pf,
1287 struct al_pcie_port *pcie_port,
1288 unsigned int pf_num)
1290 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
1291 al_assert(pf_num < pcie_port->max_num_of_pfs);
1293 if (op_mode != AL_PCIE_OPERATING_MODE_EP) {
1294 al_err("PCIe %d: can't init PF handle with operating mode [%d]\n",
1295 pcie_port->port_id, op_mode);
1299 pcie_pf->pf_num = pf_num;
1300 pcie_pf->pcie_port = pcie_port;
1302 al_dbg("PCIe %d: pf handle initialized. pf number: %d, rev_id %d, regs %p\n",
1303 pcie_port->port_id, pcie_pf->pf_num, pcie_port->rev_id,
1308 /** Get port revision ID */
1309 int al_pcie_port_rev_id_get(struct al_pcie_port *pcie_port)
1311 return pcie_port->rev_id;
1314 /************************** Pre PCIe Port Enable API **************************/
1316 /** configure pcie operating mode (root complex or endpoint) */
1318 al_pcie_port_operating_mode_config(
1319 struct al_pcie_port *pcie_port,
1320 enum al_pcie_operating_mode mode)
1322 struct al_pcie_regs *regs = pcie_port->regs;
1323 uint32_t reg, device_type, new_device_type;
1325 if (al_pcie_port_is_enabled(pcie_port)) {
1326 al_err("PCIe %d: already enabled, cannot set operating mode\n",
1327 pcie_port->port_id);
1331 reg = al_reg_read32(regs->axi.pcie_global.conf);
1333 device_type = AL_REG_FIELD_GET(reg,
1334 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1335 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1336 if (mode == AL_PCIE_OPERATING_MODE_EP) {
1337 new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP;
1338 } else if (mode == AL_PCIE_OPERATING_MODE_RC) {
1339 new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC;
1341 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1342 /* config 1 PF in RC mode */
1343 al_reg_write32_masked(regs->axi.axi_attr_ovrd.pf_sel,
1344 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_AXUSER |
1345 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1346 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_ADDR_OFFSET_MASK |
1347 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT0_OVRD |
1348 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_AXUSER |
1349 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG |
1350 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_ADDR_OFFSET_MASK |
1351 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT1_OVRD,
1352 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1353 PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG);
1356 al_err("PCIe %d: unknown operating mode: %d\n", pcie_port->port_id, mode);
1360 if (new_device_type == device_type) {
1361 al_dbg("PCIe %d: operating mode already set to %s\n",
1362 pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1363 "EndPoint" : "Root Complex");
1366 al_dbg("PCIe %d: set operating mode to %s\n",
1367 pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1368 "EndPoint" : "Root Complex");
1369 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1370 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT,
1373 al_reg_write32(regs->axi.pcie_global.conf, reg);
1379 al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
1381 struct al_pcie_regs *regs = pcie_port->regs;
1382 uint32_t active_lanes_val;
1384 if (al_pcie_port_is_enabled(pcie_port)) {
1385 al_err("PCIe %d: already enabled, cannot set max lanes\n",
1386 pcie_port->port_id);
1390 /* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
1391 active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
1393 al_reg_write32_masked(regs->axi.pcie_global.conf,
1394 (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1395 PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1396 PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1399 pcie_port->max_lanes = lanes;
1404 al_pcie_port_max_num_of_pfs_set(
1405 struct al_pcie_port *pcie_port,
1406 uint8_t max_num_of_pfs)
1408 struct al_pcie_regs *regs = pcie_port->regs;
1410 if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
1411 al_assert(max_num_of_pfs <= REV3_MAX_NUM_OF_PFS);
1413 al_assert(max_num_of_pfs == REV1_2_MAX_NUM_OF_PFS);
1415 pcie_port->max_num_of_pfs = max_num_of_pfs;
1417 if (al_pcie_port_is_enabled(pcie_port) && (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
1418 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
1420 al_bool is_multi_pf =
1421 ((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1));
1423 /* Set maximum physical function numbers */
1424 al_reg_write32_masked(
1425 ®s->port_regs->timer_ctrl_max_func_num,
1426 PCIE_PORT_GEN3_MAX_FUNC_NUM,
1427 pcie_port->max_num_of_pfs - 1);
1429 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1432 * in EP mode, when we have more than 1 PF we need to assert
1433 * multi-pf support so the host scan all PFs
1435 al_reg_write32_masked((uint32_t __iomem *)
1436 (®s->core_space[0].config_header[0] +
1437 (PCIE_BIST_HEADER_TYPE_BASE >> 2)),
1438 PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
1439 is_multi_pf ? PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK : 0);
1441 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1447 /* Inbound header credits and outstanding outbound reads configuration */
1449 al_pcie_port_ib_hcrd_os_ob_reads_config(
1450 struct al_pcie_port *pcie_port,
1451 struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config)
1453 struct al_pcie_regs *regs = pcie_port->regs;
1455 if (al_pcie_port_is_enabled(pcie_port)) {
1456 al_err("PCIe %d: already enabled, cannot configure IB credits and OB OS reads\n",
1457 pcie_port->port_id);
1461 al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0);
1463 al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0);
1465 al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0);
1467 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1469 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1470 ib_hcrd_os_ob_reads_config->nof_np_hdr +
1471 ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1472 AL_PCIE_REV3_IB_HCRD_SUM);
1474 al_reg_write32_masked(
1475 regs->axi.init_fc.cfg,
1476 PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_MASK |
1477 PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_MASK |
1478 PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1479 (ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1480 PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1481 (ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1482 PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1483 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1484 PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1487 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1488 ib_hcrd_os_ob_reads_config->nof_np_hdr +
1489 ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1490 AL_PCIE_REV_1_2_IB_HCRD_SUM);
1492 al_reg_write32_masked(
1493 regs->axi.init_fc.cfg,
1494 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_MASK |
1495 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_MASK |
1496 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1497 (ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1498 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1499 (ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1500 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1501 (ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1502 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1505 al_reg_write32_masked(
1506 regs->axi.pre_configuration.pcie_core_setup,
1507 PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
1508 ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads <<
1509 PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
1511 /* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */
1512 pcie_port->ib_hcrd_config.nof_np_hdr =
1513 ib_hcrd_os_ob_reads_config->nof_np_hdr;
1514 pcie_port->ib_hcrd_config.nof_p_hdr =
1515 ib_hcrd_os_ob_reads_config->nof_p_hdr;
1520 enum al_pcie_operating_mode
1521 al_pcie_operating_mode_get(
1522 struct al_pcie_port *pcie_port)
1524 struct al_pcie_regs *regs = pcie_port->regs;
1525 uint32_t reg, device_type;
1527 al_assert(pcie_port);
1529 reg = al_reg_read32(regs->axi.pcie_global.conf);
1531 device_type = AL_REG_FIELD_GET(reg,
1532 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1533 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1535 switch (device_type) {
1536 case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP:
1537 return AL_PCIE_OPERATING_MODE_EP;
1538 case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC:
1539 return AL_PCIE_OPERATING_MODE_RC;
1541 al_err("PCIe %d: unknown device type (%d) in global conf register.\n",
1542 pcie_port->port_id, device_type);
1544 return AL_PCIE_OPERATING_MODE_UNKNOWN;
1547 /* PCIe AXI quality of service configuration */
1548 void al_pcie_axi_qos_config(
1549 struct al_pcie_port *pcie_port,
1553 struct al_pcie_regs *regs = pcie_port->regs;
1555 al_assert(pcie_port);
1556 al_assert(arqos <= PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_VAL_MAX);
1557 al_assert(awqos <= PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_VAL_MAX);
1559 al_reg_write32_masked(
1560 regs->axi.ctrl.master_arctl,
1561 PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK,
1562 arqos << PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT);
1563 al_reg_write32_masked(
1564 regs->axi.ctrl.master_awctl,
1565 PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK,
1566 awqos << PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT);
1569 /**************************** PCIe Port Enable API ****************************/
1571 /** Enable PCIe port (deassert reset) */
1573 al_pcie_port_enable(struct al_pcie_port *pcie_port)
1575 struct al_pbs_regs *pbs_reg_base =
1576 (struct al_pbs_regs *)pcie_port->pbs_regs;
1577 struct al_pcie_regs *regs = pcie_port->regs;
1578 unsigned int port_id = pcie_port->port_id;
1580 /* pre-port-enable default functionality should be here */
1583 * Set inbound header credit and outstanding outbound reads defaults
1584 * if the port initiator doesn't set it.
1585 * Must be called before port enable (PCIE_EXIST)
1587 if ((pcie_port->ib_hcrd_config.nof_np_hdr == 0) ||
1588 (pcie_port->ib_hcrd_config.nof_p_hdr == 0))
1589 al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
1592 * Disable ATS capability
1593 * - must be done before core reset deasserted
1594 * - rev_id 0 - no effect, but no harm
1596 if ((pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
1597 (pcie_port->rev_id == AL_PCIE_REV_ID_2)) {
1598 al_reg_write32_masked(
1599 regs->axi.ordering.pos_cntl,
1600 PCIE_AXI_CORE_SETUP_ATS_CAP_DIS,
1601 PCIE_AXI_CORE_SETUP_ATS_CAP_DIS);
1604 /* Deassert core reset */
1605 al_reg_write32_masked(
1606 &pbs_reg_base->unit.pcie_conf_1,
1607 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1608 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT));
1613 /** Disable PCIe port (assert reset) */
1615 al_pcie_port_disable(struct al_pcie_port *pcie_port)
1617 struct al_pbs_regs *pbs_reg_base =
1618 (struct al_pbs_regs *)pcie_port->pbs_regs;
1619 unsigned int port_id = pcie_port->port_id;
1621 if (!al_pcie_port_is_enabled(pcie_port)) {
1622 al_warn("PCIe %d: trying to disable a non-enabled port\n",
1623 pcie_port->port_id);
1626 /* Assert core reset */
1627 al_reg_write32_masked(
1628 &pbs_reg_base->unit.pcie_conf_1,
1629 1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1634 al_pcie_port_memory_shutdown_set(
1635 struct al_pcie_port *pcie_port,
1638 struct al_pcie_regs *regs = pcie_port->regs;
1639 uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1640 PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN :
1641 PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
1643 if (!al_pcie_port_is_enabled(pcie_port)) {
1644 al_err("PCIe %d: not enabled, cannot shutdown memory\n",
1645 pcie_port->port_id);
1649 al_reg_write32_masked(regs->axi.pcie_global.conf,
1650 mask, enable == AL_TRUE ? mask : 0);
1656 al_pcie_port_is_enabled(struct al_pcie_port *pcie_port)
1658 struct al_pbs_regs *pbs_reg_base = (struct al_pbs_regs *)pcie_port->pbs_regs;
1659 uint32_t pcie_exist = al_reg_read32(&pbs_reg_base->unit.pcie_conf_1);
1661 uint32_t ports_enabled = AL_REG_FIELD_GET(pcie_exist,
1662 PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK,
1663 PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT);
1665 return (AL_REG_FIELD_GET(ports_enabled, AL_BIT(pcie_port->port_id),
1666 pcie_port->port_id) == 1);
1669 /*************************** PCIe Configuration API ***************************/
1671 /** configure pcie port (link params, etc..) */
1673 al_pcie_port_config(struct al_pcie_port *pcie_port,
1674 const struct al_pcie_port_config_params *params)
1676 struct al_pcie_regs *regs = pcie_port->regs;
1677 enum al_pcie_operating_mode op_mode;
1681 if (!al_pcie_port_is_enabled(pcie_port)) {
1682 al_err("PCIe %d: port not enabled, cannot configure port\n",
1683 pcie_port->port_id);
1687 if (al_pcie_is_link_started(pcie_port)) {
1688 al_err("PCIe %d: link already started, cannot configure port\n",
1689 pcie_port->port_id);
1693 al_assert(pcie_port);
1696 al_dbg("PCIe %d: port config\n", pcie_port->port_id);
1698 op_mode = al_pcie_operating_mode_get(pcie_port);
1700 /* if max lanes not specifies, read it from register */
1701 if (pcie_port->max_lanes == 0) {
1702 uint32_t global_conf = al_reg_read32(regs->axi.pcie_global.conf);
1703 uint32_t act_lanes = AL_REG_FIELD_GET(global_conf,
1704 (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1705 PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1706 PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1707 PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT);
1711 pcie_port->max_lanes = 1;
1714 pcie_port->max_lanes = 2;
1717 pcie_port->max_lanes = 4;
1720 pcie_port->max_lanes = 8;
1723 pcie_port->max_lanes = 0;
1724 al_err("PCIe %d: invalid max lanes val (0x%x)\n", pcie_port->port_id, act_lanes);
1729 if (params->link_params)
1730 status = al_pcie_port_link_config(pcie_port, params->link_params);
1734 /* Change max read request size to 256 bytes
1735 * Max Payload Size is remained untouched- it is the responsibility of
1736 * the host to change the MPS, if needed.
1738 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1739 al_reg_write32_masked(regs->core_space[i].pcie_dev_ctrl_status,
1740 PCIE_PORT_DEV_CTRL_STATUS_MRRS_MASK,
1741 PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_256);
1742 if (pcie_port->rev_id != AL_PCIE_REV_ID_3)
1746 if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1747 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1749 /* Disable TPH next pointer */
1750 for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1751 al_reg_write32_masked(regs->core_space[i].tph_cap_base,
1752 PCIE_TPH_NEXT_POINTER, 0);
1755 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1759 status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop);
1763 al_pcie_port_max_num_of_pfs_set(pcie_port, pcie_port->max_num_of_pfs);
1765 al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
1767 al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
1769 al_pcie_port_relaxed_pcie_ordering_config(pcie_port, params->relaxed_ordering_params);
1771 if (params->lat_rply_timers)
1772 status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers);
1776 if (params->gen2_params)
1777 status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params);
1781 if (params->gen3_params)
1782 status = al_pcie_port_gen3_params_config(pcie_port, params->gen3_params);
1786 if (params->sris_params)
1787 status = al_pcie_port_sris_config(pcie_port, params->sris_params,
1788 params->link_params->max_speed);
1792 al_pcie_port_ib_hcrd_config(pcie_port);
1794 if (params->fast_link_mode) {
1795 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
1796 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT,
1797 1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT);
1800 if (params->enable_axi_slave_err_resp)
1801 al_reg_write32_masked(®s->port_regs->axi_slave_err_resp,
1802 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT,
1803 1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT);
1806 * Addressing RMN: 5477
1809 * address-decoder logic performs sub-target decoding even for transactions
1810 * which undergo target enforcement. thus, in case transaction's address is
1811 * inside any ECAM bar, the sub-target decoding will be set to ECAM, which
1812 * causes wrong handling by PCIe unit
1815 * on EP mode only, turning on the iATU-enable bit (with the relevant mask
1816 * below) allows the PCIe unit to discard the ECAM bit which was asserted
1817 * by-mistake in the address-decoder
1819 if (op_mode == AL_PCIE_OPERATING_MODE_EP) {
1820 al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1821 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1822 (0) << PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
1823 al_reg_write32_masked(regs->axi.ob_ctrl.cfg_control,
1824 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN,
1825 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN);
1828 if (op_mode == AL_PCIE_OPERATING_MODE_RC) {
1830 * enable memory and I/O access from port when in RC mode
1831 * in RC mode, only core_space[0] is valid.
1833 al_reg_write16_masked(
1834 (uint16_t __iomem *)(®s->core_space[0].config_header[0] + (0x4 >> 2)),
1835 0x7, /* Mem, MSE, IO */
1838 /* change the class code to match pci bridge */
1839 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1841 al_reg_write32_masked(
1842 (uint32_t __iomem *)(®s->core_space[0].config_header[0]
1843 + (PCI_CLASS_REVISION >> 2)),
1847 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1850 * Addressing RMN: 5702
1853 * target bus mask default value in HW is: 0xFE, this enforces
1854 * setting the target bus for ports 1 and 3 when running on RC
1855 * mode since bit[20] in ECAM address in these cases is set
1858 * on RC mode only, set target-bus value to 0xFF to prevent this
1861 al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1862 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1863 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK);
1866 al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done");
1873 struct al_pcie_pf *pcie_pf,
1874 const struct al_pcie_pf_config_params *params)
1876 struct al_pcie_port *pcie_port;
1882 pcie_port = pcie_pf->pcie_port;
1884 if (!al_pcie_port_is_enabled(pcie_port)) {
1885 al_err("PCIe %d: port not enabled, cannot configure port\n", pcie_port->port_id);
1889 al_dbg("PCIe %d: pf %d config\n", pcie_port->port_id, pcie_pf->pf_num);
1892 status = al_pcie_port_pf_params_config(pcie_pf, params);
1897 al_dbg("PCIe %d: pf %d config %s\n",
1898 pcie_port->port_id, pcie_pf->pf_num, status ? "failed" : "done");
1903 /************************** PCIe Link Operations API **************************/
1905 /* start pcie link */
1907 al_pcie_link_start(struct al_pcie_port *pcie_port)
1909 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1911 if (!al_pcie_port_is_enabled(pcie_port)) {
1912 al_err("PCIe %d: port not enabled, cannot start link\n",
1913 pcie_port->port_id);
1917 al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id);
1919 al_reg_write32_masked(
1920 regs->app.global_ctrl.port_init,
1921 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1922 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1927 /* stop pcie link */
1929 al_pcie_link_stop(struct al_pcie_port *pcie_port)
1931 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1933 if (!al_pcie_is_link_started(pcie_port)) {
1934 al_warn("PCIe %d: trying to stop a non-started link\n",
1935 pcie_port->port_id);
1938 al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id);
1940 al_reg_write32_masked(
1941 regs->app.global_ctrl.port_init,
1942 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1943 ~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1948 /** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
1949 al_bool al_pcie_is_link_started(struct al_pcie_port *pcie_port)
1951 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1953 uint32_t port_init = al_reg_read32(regs->app.global_ctrl.port_init);
1954 uint8_t ltssm_en = AL_REG_FIELD_GET(port_init,
1955 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1956 PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
1961 /* wait for link up indication */
1963 al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
1965 int wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC;
1967 while (wait_count-- > 0) {
1968 if (al_pcie_check_link(pcie_port, NULL)) {
1969 al_dbg("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
1972 al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
1973 pcie_port->port_id, wait_count);
1975 al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
1977 al_dbg("PCIE_%d: link is not established in time\n",
1978 pcie_port->port_id);
1983 /** get link status */
1985 al_pcie_link_status(struct al_pcie_port *pcie_port,
1986 struct al_pcie_link_status *status)
1988 struct al_pcie_regs *regs = pcie_port->regs;
1989 uint16_t pcie_lnksta;
1993 if (!al_pcie_port_is_enabled(pcie_port)) {
1994 al_dbg("PCIe %d: port not enabled, no link.\n", pcie_port->port_id);
1995 status->link_up = AL_FALSE;
1996 status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
1998 status->ltssm_state = 0;
2002 status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
2004 if (!status->link_up) {
2005 status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
2010 pcie_lnksta = al_reg_read16((uint16_t __iomem *)regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1));
2012 switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) {
2013 case AL_PCI_EXP_LNKSTA_CLS_2_5GB:
2014 status->speed = AL_PCIE_LINK_SPEED_GEN1;
2016 case AL_PCI_EXP_LNKSTA_CLS_5_0GB:
2017 status->speed = AL_PCIE_LINK_SPEED_GEN2;
2019 case AL_PCI_EXP_LNKSTA_CLS_8_0GB:
2020 status->speed = AL_PCIE_LINK_SPEED_GEN3;
2023 status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
2024 al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n",
2025 pcie_port->port_id, pcie_lnksta);
2027 status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
2028 al_dbg("PCIe %d: Link up. speed gen%d negotiated width %d\n",
2029 pcie_port->port_id, status->speed, status->lanes);
2034 /** get lane status */
2036 al_pcie_lane_status_get(
2037 struct al_pcie_port *pcie_port,
2039 struct al_pcie_lane_status *status)
2041 struct al_pcie_regs *regs = pcie_port->regs;
2042 uint32_t lane_status;
2045 al_assert(pcie_port);
2047 al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_1) || (lane < REV1_2_MAX_NUM_LANES));
2048 al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_2) || (lane < REV1_2_MAX_NUM_LANES));
2049 al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_3) || (lane < REV3_MAX_NUM_LANES));
2051 reg_ptr = regs->axi.status.lane[lane];
2053 /* Reset field is valid only when same value is read twice */
2055 lane_status = al_reg_read32(reg_ptr);
2056 status->is_reset = !!(lane_status & PCIE_AXI_STATUS_LANE_IS_RESET);
2057 } while (status->is_reset != (!!(al_reg_read32(reg_ptr) & PCIE_AXI_STATUS_LANE_IS_RESET)));
2059 status->requested_speed =
2060 (lane_status & PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_MASK) >>
2061 PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_SHIFT;
2064 /** trigger hot reset */
2066 al_pcie_link_hot_reset(struct al_pcie_port *pcie_port, al_bool enable)
2068 struct al_pcie_regs *regs = pcie_port->regs;
2069 uint32_t events_gen;
2070 al_bool app_reset_state;
2071 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2073 if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2074 al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2078 if (!al_pcie_is_link_started(pcie_port)) {
2079 al_err("PCIe %d: link not started, cannot trigger hot-reset\n", pcie_port->port_id);
2083 events_gen = al_reg_read32(regs->app.global_ctrl.events_gen[0]);
2084 app_reset_state = events_gen & PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT;
2086 if (enable && app_reset_state) {
2087 al_err("PCIe %d: link is already in hot-reset state\n", pcie_port->port_id);
2089 } else if ((!enable) && (!(app_reset_state))) {
2090 al_err("PCIe %d: link is already in non-hot-reset state\n", pcie_port->port_id);
2093 al_dbg("PCIe %d: %s hot-reset\n", pcie_port->port_id,
2094 (enable ? "enabling" : "disabling"));
2095 /* hot-reset functionality is implemented only for function 0 */
2096 al_reg_write32_masked(regs->app.global_ctrl.events_gen[0],
2097 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT,
2098 (enable ? PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT
2099 : ~PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT));
2104 /** disable port link */
2106 al_pcie_link_disable(struct al_pcie_port *pcie_port, al_bool disable)
2108 struct al_pcie_regs *regs = pcie_port->regs;
2109 uint32_t pcie_lnkctl;
2110 al_bool link_disable_state;
2111 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2113 if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2114 al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2118 if (!al_pcie_is_link_started(pcie_port)) {
2119 al_err("PCIe %d: link not started, cannot disable link\n", pcie_port->port_id);
2123 pcie_lnkctl = al_reg_read32(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1));
2124 link_disable_state = pcie_lnkctl & AL_PCI_EXP_LNKCTL_LNK_DIS;
2126 if (disable && link_disable_state) {
2127 al_err("PCIe %d: link is already in disable state\n", pcie_port->port_id);
2129 } else if ((!disable) && (!(link_disable_state))) {
2130 al_err("PCIe %d: link is already in enable state\n", pcie_port->port_id);
2134 al_dbg("PCIe %d: %s port\n", pcie_port->port_id, (disable ? "disabling" : "enabling"));
2135 al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2136 AL_PCI_EXP_LNKCTL_LNK_DIS,
2137 (disable ? AL_PCI_EXP_LNKCTL_LNK_DIS : ~AL_PCI_EXP_LNKCTL_LNK_DIS));
2143 al_pcie_link_retrain(struct al_pcie_port *pcie_port)
2145 struct al_pcie_regs *regs = pcie_port->regs;
2146 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2148 if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2149 al_err("PCIe %d: link-retrain is applicable only for RC mode\n",
2150 pcie_port->port_id);
2154 if (!al_pcie_is_link_started(pcie_port)) {
2155 al_err("PCIe %d: link not started, cannot link-retrain\n", pcie_port->port_id);
2159 al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2160 AL_PCI_EXP_LNKCTL_LNK_RTRN, AL_PCI_EXP_LNKCTL_LNK_RTRN);
2165 /* trigger speed change */
2167 al_pcie_link_change_speed(struct al_pcie_port *pcie_port,
2168 enum al_pcie_link_speed new_speed)
2170 struct al_pcie_regs *regs = pcie_port->regs;
2172 if (!al_pcie_is_link_started(pcie_port)) {
2173 al_err("PCIe %d: link not started, cannot change speed\n", pcie_port->port_id);
2177 al_dbg("PCIe %d: changing speed to %d\n", pcie_port->port_id, new_speed);
2179 al_pcie_port_link_speed_ctrl_set(pcie_port, new_speed);
2181 al_reg_write32_masked(®s->port_regs->gen2_ctrl,
2182 PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE,
2183 PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE);
2188 /* TODO: check if this function needed */
2190 al_pcie_link_change_width(struct al_pcie_port *pcie_port,
2191 uint8_t width __attribute__((__unused__)))
2193 al_err("PCIe %d: link change width not implemented\n",
2194 pcie_port->port_id);
2199 /**************************** Post Link Start API *****************************/
2201 /************************** Snoop Configuration API ***************************/
2204 al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop)
2206 struct al_pcie_regs *regs = pcie_port->regs;
2208 /* Set snoop mode */
2209 al_dbg("PCIE_%d: snoop mode %s\n",
2210 pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
2212 if (enable_axi_snoop) {
2213 al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2214 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2215 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP);
2217 al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2218 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2219 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP);
2221 al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2222 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2223 PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP);
2225 al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2226 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2227 PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP);
2232 /************************** Configuration Space API ***************************/
2234 /** get base address of pci configuration space header */
2236 al_pcie_config_space_get(struct al_pcie_pf *pcie_pf,
2237 uint8_t __iomem **addr)
2239 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2241 *addr = (uint8_t __iomem *)®s->core_space[pcie_pf->pf_num].config_header[0];
2245 /* Read data from the local configuration space */
2247 al_pcie_local_cfg_space_read(
2248 struct al_pcie_pf *pcie_pf,
2249 unsigned int reg_offset)
2251 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2254 data = al_reg_read32(®s->core_space[pcie_pf->pf_num].config_header[reg_offset]);
2259 /* Write data to the local configuration space */
2261 al_pcie_local_cfg_space_write(
2262 struct al_pcie_pf *pcie_pf,
2263 unsigned int reg_offset,
2266 al_bool allow_ro_wr)
2268 struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
2269 struct al_pcie_regs *regs = pcie_port->regs;
2270 unsigned int pf_num = pcie_pf->pf_num;
2271 uint32_t *offset = ®s->core_space[pf_num].config_header[reg_offset];
2274 al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
2276 if (cs2 == AL_FALSE)
2277 al_reg_write32(offset, data);
2279 al_reg_write32_dbi_cs2(pcie_port, offset, data);
2282 al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
2285 /** set target_bus and mask_target_bus */
2287 al_pcie_target_bus_set(
2288 struct al_pcie_port *pcie_port,
2290 uint8_t mask_target_bus)
2292 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2295 reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2296 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2297 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT,
2299 AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2300 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT,
2302 al_reg_write32(regs->axi.ob_ctrl.cfg_target_bus, reg);
2306 /** get target_bus and mask_target_bus */
2308 al_pcie_target_bus_get(
2309 struct al_pcie_port *pcie_port,
2310 uint8_t *target_bus,
2311 uint8_t *mask_target_bus)
2313 struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2316 al_assert(target_bus);
2317 al_assert(mask_target_bus);
2319 reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2321 *mask_target_bus = AL_REG_FIELD_GET(reg,
2322 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2323 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
2324 *target_bus = AL_REG_FIELD_GET(reg,
2325 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2326 PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT);
2330 /** Set secondary bus number */
2332 al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus)
2334 struct al_pcie_regs *regs = pcie_port->regs;
2336 uint32_t secbus_val = (secbus <<
2337 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT);
2339 al_reg_write32_masked(
2340 regs->axi.ob_ctrl.cfg_control,
2341 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK,
2346 /** Set sub-ordinary bus number */
2348 al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus)
2350 struct al_pcie_regs *regs = pcie_port->regs;
2352 uint32_t subbus_val = (subbus <<
2353 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT);
2355 al_reg_write32_masked(
2356 regs->axi.ob_ctrl.cfg_control,
2357 PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK,
2362 /* Enable/disable deferring incoming configuration requests */
2364 al_pcie_app_req_retry_set(
2365 struct al_pcie_port *pcie_port,
2368 struct al_pcie_regs *regs = pcie_port->regs;
2369 uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
2370 PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
2371 PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
2373 al_reg_write32_masked(regs->app.global_ctrl.pm_control,
2374 mask, (en == AL_TRUE) ? mask : 0);
2377 /* Check if deferring incoming configuration requests is enabled or not */
2378 al_bool al_pcie_app_req_retry_get_status(struct al_pcie_port *pcie_port)
2380 struct al_pcie_regs *regs = pcie_port->regs;
2381 uint32_t pm_control;
2382 uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
2383 PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
2384 PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
2386 pm_control = al_reg_read32(regs->app.global_ctrl.pm_control);
2387 return (pm_control & mask) ? AL_TRUE : AL_FALSE;
2390 /*************** Internal Address Translation Unit (ATU) API ******************/
2392 /** program internal ATU region entry */
2394 al_pcie_atu_region_set(
2395 struct al_pcie_port *pcie_port,
2396 struct al_pcie_atu_region *atu_region)
2398 struct al_pcie_regs *regs = pcie_port->regs;
2399 enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2403 * Addressing RMN: 5384
2406 * From SNPS (also included in the data book) Dynamic iATU Programming
2407 * With AHB/AXI Bridge Module When the bridge slave interface clock
2408 * (hresetn or slv_aclk) is asynchronous to the PCIe native core clock
2409 * (core_clk), you must not update the iATU registers while operations
2410 * are in progress on the AHB/AXI bridge slave interface. The iATU
2411 * registers are in the core_clk clock domain. The register outputs are
2412 * used in the AHB/AXI bridge slave interface clock domain. There is no
2413 * synchronization logic between these registers and the AHB/AXI bridge
2417 * Do not allow configuring Outbound iATU after link is started
2419 if ((atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)
2420 && (al_pcie_is_link_started(pcie_port))) {
2421 if (!atu_region->enforce_ob_atu_region_set) {
2422 al_err("PCIe %d: setting OB iATU after link is started is not allowed\n",
2423 pcie_port->port_id);
2424 al_assert(AL_FALSE);
2427 al_info("PCIe %d: setting OB iATU even after link is started\n",
2428 pcie_port->port_id);
2432 /*TODO : add sanity check */
2433 AL_REG_FIELD_SET(reg, 0xF, 0, atu_region->index);
2434 AL_REG_BIT_VAL_SET(reg, 31, atu_region->direction);
2435 al_reg_write32(®s->port_regs->iatu.index, reg);
2437 al_reg_write32(®s->port_regs->iatu.lower_base_addr,
2438 (uint32_t)(atu_region->base_addr & 0xFFFFFFFF));
2439 al_reg_write32(®s->port_regs->iatu.upper_base_addr,
2440 (uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF));
2441 al_reg_write32(®s->port_regs->iatu.lower_target_addr,
2442 (uint32_t)(atu_region->target_addr & 0xFFFFFFFF));
2443 al_reg_write32(®s->port_regs->iatu.upper_target_addr,
2444 (uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF));
2446 /* configure the limit, not needed when working in BAR match mode */
2447 if (atu_region->match_mode == 0) {
2448 uint32_t limit_reg_val;
2449 uint32_t *limit_ext_reg =
2450 (atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
2451 ®s->app.atu.out_mask_pair[atu_region->index / 2] :
2452 ®s->app.atu.in_mask_pair[atu_region->index / 2];
2453 uint32_t limit_ext_reg_mask =
2454 (atu_region->index % 2) ?
2455 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
2456 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
2457 unsigned int limit_ext_reg_shift =
2458 (atu_region->index % 2) ?
2459 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
2460 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
2461 uint64_t limit_sz_msk =
2462 atu_region->limit - atu_region->base_addr;
2463 uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
2466 if (limit_ext_reg_val) {
2467 limit_reg_val = (uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
2468 al_assert(limit_reg_val == 0xFFFFFFFF);
2470 limit_reg_val = (uint32_t)(atu_region->limit &
2474 al_reg_write32_masked(
2477 limit_ext_reg_val << limit_ext_reg_shift);
2479 al_reg_write32(®s->port_regs->iatu.limit_addr,
2485 * Addressing RMN: 3186
2488 * Bug in SNPS IP (versions 4.21 , 4.10a-ea02)
2489 * In CFG request created via outbound atu (shift mode) bits [27:12] go to
2490 * [31:16] , the shifting is correct , however the ATU leaves bit [15:12]
2491 * to their original values, this is then transmited in the tlp .
2492 * Those bits are currently reserved ,bit might be non-resv. in future generations .
2496 * rev=REV1,REV2 set bit 15 in corresponding app_reg.atu.out_mask
2497 * rev>REV2 set corresponding bit is app_reg.atu.reg_out_mask
2499 if ((atu_region->cfg_shift_mode == AL_TRUE) &&
2500 (atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)) {
2501 if (pcie_port->rev_id > AL_PCIE_REV_ID_2) {
2502 al_reg_write32_masked(regs->app.atu.reg_out_mask,
2503 1 << (atu_region->index) ,
2504 1 << (atu_region->index));
2506 uint32_t *limit_ext_reg =
2507 (atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
2508 ®s->app.atu.out_mask_pair[atu_region->index / 2] :
2509 ®s->app.atu.in_mask_pair[atu_region->index / 2];
2510 uint32_t limit_ext_reg_mask =
2511 (atu_region->index % 2) ?
2512 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
2513 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
2514 unsigned int limit_ext_reg_shift =
2515 (atu_region->index % 2) ?
2516 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
2517 PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
2519 al_reg_write32_masked(
2522 (AL_BIT(15)) << limit_ext_reg_shift);
2527 AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type);
2528 AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr);
2531 if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
2532 && (op_mode == AL_PCIE_OPERATING_MODE_EP)
2533 && (atu_region->function_match_bypass_mode)) {
2534 AL_REG_FIELD_SET(reg,
2535 PCIE_IATU_CR1_FUNC_NUM_MASK,
2536 PCIE_IATU_CR1_FUNC_NUM_SHIFT,
2537 atu_region->function_match_bypass_mode_number);
2540 al_reg_write32(®s->port_regs->iatu.cr1, reg);
2542 /* Enable/disable the region. */
2544 AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code);
2545 AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number);
2546 AL_REG_FIELD_SET(reg, 0x3 << 24, 24, atu_region->response);
2547 AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE);
2548 AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE);
2549 AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE);
2550 AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE);
2551 if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO)
2552 AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode);
2553 AL_REG_BIT_VAL_SET(reg, 31, !!atu_region->enable);
2555 /* In outbound, enable function bypass
2556 * In inbound, enable function match mode
2557 * Note: this is the same bit, has different meanings in ob/ib ATUs
2559 if (op_mode == AL_PCIE_OPERATING_MODE_EP)
2560 AL_REG_FIELD_SET(reg,
2561 PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_MASK,
2562 PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_SHIFT,
2563 atu_region->function_match_bypass_mode ? 0x1 : 0x0);
2565 al_reg_write32(®s->port_regs->iatu.cr2, reg);
2570 /** obtains internal ATU region base/target addresses */
2572 al_pcie_atu_region_get_fields(
2573 struct al_pcie_port *pcie_port,
2574 enum al_pcie_atu_dir direction, uint8_t index,
2575 al_bool *enable, uint64_t *base_addr, uint64_t *target_addr)
2577 struct al_pcie_regs *regs = pcie_port->regs;
2581 AL_REG_FIELD_SET(reg, 0xF, 0, index);
2582 AL_REG_BIT_VAL_SET(reg, 31, direction);
2583 al_reg_write32(®s->port_regs->iatu.index, reg);
2585 *base_addr = al_reg_read32(®s->port_regs->iatu.lower_base_addr);
2586 high_addr = al_reg_read32(®s->port_regs->iatu.upper_base_addr);
2588 *base_addr |= high_addr;
2590 *target_addr = al_reg_read32(®s->port_regs->iatu.lower_target_addr);
2591 high_addr = al_reg_read32(®s->port_regs->iatu.upper_target_addr);
2593 *target_addr |= high_addr;
2595 reg = al_reg_read32(®s->port_regs->iatu.cr1);
2596 *enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
2600 al_pcie_axi_io_config(
2601 struct al_pcie_port *pcie_port,
2602 al_phys_addr_t start,
2605 struct al_pcie_regs *regs = pcie_port->regs;
2607 al_reg_write32(regs->axi.ob_ctrl.io_start_h,
2608 (uint32_t)((start >> 32) & 0xFFFFFFFF));
2610 al_reg_write32(regs->axi.ob_ctrl.io_start_l,
2611 (uint32_t)(start & 0xFFFFFFFF));
2613 al_reg_write32(regs->axi.ob_ctrl.io_limit_h,
2614 (uint32_t)((end >> 32) & 0xFFFFFFFF));
2616 al_reg_write32(regs->axi.ob_ctrl.io_limit_l,
2617 (uint32_t)(end & 0xFFFFFFFF));
2619 al_reg_write32_masked(regs->axi.ctrl.slv_ctl,
2620 PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN,
2621 PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
2624 /************** Interrupt and Event generation (Endpoint mode Only) API *****************/
2626 int al_pcie_pf_flr_done_gen(struct al_pcie_pf *pcie_pf)
2628 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2629 unsigned int pf_num = pcie_pf->pf_num;
2631 al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
2632 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE,
2633 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE);
2634 al_udelay(AL_PCIE_FLR_DONE_INTERVAL);
2635 al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
2636 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE, 0);
2641 /** generate INTx Assert/DeAssert Message */
2643 al_pcie_legacy_int_gen(
2644 struct al_pcie_pf *pcie_pf,
2646 enum al_pcie_legacy_int_type type)
2648 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2649 unsigned int pf_num = pcie_pf->pf_num;
2652 al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */
2653 reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2654 AL_REG_BIT_VAL_SET(reg, 3, !!assert);
2655 al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2660 /** generate MSI interrupt */
2662 al_pcie_msi_int_gen(struct al_pcie_pf *pcie_pf, uint8_t vector)
2664 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2665 unsigned int pf_num = pcie_pf->pf_num;
2668 /* set msi vector and clear MSI request */
2669 reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2670 AL_REG_BIT_CLEAR(reg, 4);
2671 AL_REG_FIELD_SET(reg,
2672 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK,
2673 PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT,
2675 al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2676 /* set MSI request */
2677 AL_REG_BIT_SET(reg, 4);
2678 al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2683 /** configure MSIX capability */
2685 al_pcie_msix_config(
2686 struct al_pcie_pf *pcie_pf,
2687 struct al_pcie_msix_params *msix_params)
2689 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2690 unsigned int pf_num = pcie_pf->pf_num;
2693 al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_TRUE);
2695 msix_reg0 = al_reg_read32(regs->core_space[pf_num].msix_cap_base);
2697 msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT);
2698 msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) <<
2699 AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT;
2700 al_reg_write32(regs->core_space[pf_num].msix_cap_base, msix_reg0);
2702 /* Table offset & BAR */
2703 al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_TABLE >> 2),
2704 (msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) |
2705 (msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR));
2706 /* PBA offset & BAR */
2707 al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_PBA >> 2),
2708 (msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) |
2709 (msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR));
2711 al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_FALSE);
2716 /** check whether MSIX is enabled */
2718 al_pcie_msix_enabled(struct al_pcie_pf *pcie_pf)
2720 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2721 uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2723 if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN)
2728 /** check whether MSIX is masked */
2730 al_pcie_msix_masked(struct al_pcie_pf *pcie_pf)
2732 struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2733 uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2735 if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK)
2740 /******************** Advanced Error Reporting (AER) API **********************/
2741 /************************* Auxiliary functions ********************************/
2742 /* configure AER capability */
2744 al_pcie_aer_config_aux(
2745 struct al_pcie_port *pcie_port,
2746 unsigned int pf_num,
2747 struct al_pcie_aer_params *params)
2749 struct al_pcie_regs *regs = pcie_port->regs;
2750 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2753 reg_val = al_reg_read32(&aer_regs->header);
2755 if (((reg_val & PCIE_AER_CAP_ID_MASK) >> PCIE_AER_CAP_ID_SHIFT) !=
2756 PCIE_AER_CAP_ID_VAL)
2759 if (((reg_val & PCIE_AER_CAP_VER_MASK) >> PCIE_AER_CAP_VER_SHIFT) !=
2760 PCIE_AER_CAP_VER_VAL)
2763 al_reg_write32(&aer_regs->corr_err_mask, ~params->enabled_corr_err);
2765 al_reg_write32(&aer_regs->uncorr_err_mask,
2766 (~params->enabled_uncorr_non_fatal_err) |
2767 (~params->enabled_uncorr_fatal_err));
2769 al_reg_write32(&aer_regs->uncorr_err_severity,
2770 params->enabled_uncorr_fatal_err);
2772 al_reg_write32(&aer_regs->cap_and_ctrl,
2773 (params->ecrc_gen_en ? PCIE_AER_CTRL_STAT_ECRC_GEN_EN : 0) |
2774 (params->ecrc_chk_en ? PCIE_AER_CTRL_STAT_ECRC_CHK_EN : 0));
2777 * Addressing RMN: 5119
2780 * ECRC generation for outbound request translated by iATU is effected
2781 * by iATU setting instead of ecrc_gen_bit in AER
2784 * When enabling ECRC generation, set the outbound iATU to generate ECRC
2786 if (params->ecrc_gen_en == AL_TRUE) {
2787 al_pcie_ecrc_gen_ob_atu_enable(pcie_port, pf_num);
2790 al_reg_write32_masked(
2791 regs->core_space[pf_num].pcie_dev_ctrl_status,
2792 PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN |
2793 PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN |
2794 PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN |
2795 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN,
2796 (params->enabled_corr_err ?
2797 PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN : 0) |
2798 (params->enabled_uncorr_non_fatal_err ?
2799 PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN : 0) |
2800 (params->enabled_uncorr_fatal_err ?
2801 PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN : 0) |
2802 ((params->enabled_uncorr_non_fatal_err &
2803 AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2804 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0) |
2805 ((params->enabled_uncorr_fatal_err &
2806 AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2807 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0));
2812 /** AER uncorrectable errors get and clear */
2814 al_pcie_aer_uncorr_get_and_clear_aux(
2815 struct al_pcie_port *pcie_port,
2816 unsigned int pf_num)
2818 struct al_pcie_regs *regs = pcie_port->regs;
2819 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2822 reg_val = al_reg_read32(&aer_regs->uncorr_err_stat);
2823 al_reg_write32(&aer_regs->uncorr_err_stat, reg_val);
2828 /** AER correctable errors get and clear */
2830 al_pcie_aer_corr_get_and_clear_aux(
2831 struct al_pcie_port *pcie_port,
2832 unsigned int pf_num)
2834 struct al_pcie_regs *regs = pcie_port->regs;
2835 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2838 reg_val = al_reg_read32(&aer_regs->corr_err_stat);
2839 al_reg_write32(&aer_regs->corr_err_stat, reg_val);
2844 #if (AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS != 4)
2845 #error Wrong assumption!
2848 /** AER get the header for the TLP corresponding to a detected error */
2850 al_pcie_aer_err_tlp_hdr_get_aux(
2851 struct al_pcie_port *pcie_port,
2852 unsigned int pf_num,
2853 uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2855 struct al_pcie_regs *regs = pcie_port->regs;
2856 struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2859 for (i = 0; i < AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS; i++)
2860 hdr[i] = al_reg_read32(&aer_regs->header_log[i]);
2863 /******************** EP AER functions **********************/
2864 /** configure EP physical function AER capability */
2865 int al_pcie_aer_config(
2866 struct al_pcie_pf *pcie_pf,
2867 struct al_pcie_aer_params *params)
2872 return al_pcie_aer_config_aux(
2873 pcie_pf->pcie_port, pcie_pf->pf_num, params);
2876 /** EP physical function AER uncorrectable errors get and clear */
2877 unsigned int al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
2881 return al_pcie_aer_uncorr_get_and_clear_aux(
2882 pcie_pf->pcie_port, pcie_pf->pf_num);
2885 /** EP physical function AER correctable errors get and clear */
2886 unsigned int al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
2890 return al_pcie_aer_corr_get_and_clear_aux(
2891 pcie_pf->pcie_port, pcie_pf->pf_num);
2895 * EP physical function AER get the header for
2896 * the TLP corresponding to a detected error
2898 void al_pcie_aer_err_tlp_hdr_get(
2899 struct al_pcie_pf *pcie_pf,
2900 uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2905 al_pcie_aer_err_tlp_hdr_get_aux(
2906 pcie_pf->pcie_port, pcie_pf->pf_num, hdr);
2909 /******************** RC AER functions **********************/
2910 /** configure RC port AER capability */
2911 int al_pcie_port_aer_config(
2912 struct al_pcie_port *pcie_port,
2913 struct al_pcie_aer_params *params)
2915 al_assert(pcie_port);
2919 * For RC mode there's no PFs (neither PF handles),
2920 * therefore PF#0 is used
2922 return al_pcie_aer_config_aux(pcie_port, 0, params);
2925 /** RC port AER uncorrectable errors get and clear */
2926 unsigned int al_pcie_port_aer_uncorr_get_and_clear(
2927 struct al_pcie_port *pcie_port)
2929 al_assert(pcie_port);
2932 * For RC mode there's no PFs (neither PF handles),
2933 * therefore PF#0 is used
2935 return al_pcie_aer_uncorr_get_and_clear_aux(pcie_port, 0);
2938 /** RC port AER correctable errors get and clear */
2939 unsigned int al_pcie_port_aer_corr_get_and_clear(
2940 struct al_pcie_port *pcie_port)
2942 al_assert(pcie_port);
2945 * For RC mode there's no PFs (neither PF handles),
2946 * therefore PF#0 is used
2948 return al_pcie_aer_corr_get_and_clear_aux(pcie_port, 0);
2951 /** RC port AER get the header for the TLP corresponding to a detected error */
2952 void al_pcie_port_aer_err_tlp_hdr_get(
2953 struct al_pcie_port *pcie_port,
2954 uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2956 al_assert(pcie_port);
2960 * For RC mode there's no PFs (neither PF handles),
2961 * therefore PF#0 is used
2963 al_pcie_aer_err_tlp_hdr_get_aux(pcie_port, 0, hdr);
2966 /********************** Loopback mode (RC and Endpoint modes) ************/
2968 /** enter local pipe loopback mode */
2970 al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port)
2972 struct al_pcie_regs *regs = pcie_port->regs;
2974 al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode", pcie_port->port_id);
2976 al_reg_write32_masked(®s->port_regs->pipe_loopback_ctrl,
2977 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2978 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
2980 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
2981 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2982 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT);
2988 * @brief exit local pipe loopback mode
2990 * @param pcie_port pcie port handle
2991 * @return 0 if no error found
2994 al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port)
2996 struct al_pcie_regs *regs = pcie_port->regs;
2998 al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode", pcie_port->port_id);
3000 al_reg_write32_masked(®s->port_regs->pipe_loopback_ctrl,
3001 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
3004 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
3005 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
3010 /** enter remote loopback mode */
3012 al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port)
3014 struct al_pcie_regs *regs = pcie_port->regs;
3016 al_dbg("PCIe %d: Enter REMOTE Loopback mode", pcie_port->port_id);
3018 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
3019 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
3020 1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
3026 * @brief exit remote loopback mode
3028 * @param pcie_port pcie port handle
3029 * @return 0 if no error found
3032 al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port)
3034 struct al_pcie_regs *regs = pcie_port->regs;
3036 al_dbg("PCIe %d: Exit REMOTE Loopback mode", pcie_port->port_id);
3038 al_reg_write32_masked(®s->port_regs->port_link_ctrl,
3039 1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,