1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
47 * Interface to PCIe as a host(RC) or target(EP)
49 * <hr>$Revision: 41586 $<hr>
52 #include "cvmx-csr-db.h"
53 #include "cvmx-pcie.h"
54 #include "cvmx-sysinfo.h"
55 #include "cvmx-swap.h"
57 #include "cvmx-helper-errata.h"
61 * Return the Core virtual base address for PCIe IO access. IOs are
62 * read/written as an offset from this address.
64 * @param pcie_port PCIe port the IO is for
66 * @return 64bit Octeon IO base address for read/write
68 uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
70 cvmx_pcie_address_t pcie_addr;
72 pcie_addr.io.upper = 0;
75 pcie_addr.io.subdid = 2;
77 pcie_addr.io.port = pcie_port;
83 * Size of the IO address region returned at address
84 * cvmx_pcie_get_io_base_address()
86 * @param pcie_port PCIe port the IO is for
88 * @return Size of the IO window
90 uint64_t cvmx_pcie_get_io_size(int pcie_port)
97 * Return the Core virtual base address for PCIe MEM access. Memory is
98 * read/written as an offset from this address.
100 * @param pcie_port PCIe port the IO is for
102 * @return 64bit Octeon IO base address for read/write
104 uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
106 cvmx_pcie_address_t pcie_addr;
108 pcie_addr.mem.upper = 0;
109 pcie_addr.mem.io = 1;
110 pcie_addr.mem.did = 3;
111 pcie_addr.mem.subdid = 3 + pcie_port;
112 return pcie_addr.u64;
117 * Size of the Mem address region returned at address
118 * cvmx_pcie_get_mem_base_address()
120 * @param pcie_port PCIe port the IO is for
122 * @return Size of the Mem window
124 uint64_t cvmx_pcie_get_mem_size(int pcie_port)
132 * Initialize the RC config space CSRs
134 * @param pcie_port PCIe port to initialize
136 static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
138 /* Max Payload Size (PCIE*_CFG030[MPS]) */
139 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
140 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
141 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
143 cvmx_pciercx_cfg030_t pciercx_cfg030;
144 pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
145 pciercx_cfg030.s.mps = 0; /* Max payload size = 128 bytes for best Octeon DMA performance */
146 pciercx_cfg030.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
147 pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
148 pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
149 pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
150 pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
151 pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
152 pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
153 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
156 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
157 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
159 cvmx_npei_ctl_status2_t npei_ctl_status2;
160 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
161 npei_ctl_status2.s.mps = 0; /* Max payload size = 128 bytes for best Octeon DMA performance */
162 npei_ctl_status2.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
163 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
166 /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
168 cvmx_pciercx_cfg070_t pciercx_cfg070;
169 pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
170 pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
171 pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
172 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
175 /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
176 /* ME and MSAE should always be set. */
177 /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
178 /* System Error Message Enable (PCIE*_CFG001[SEE]) */
180 cvmx_pciercx_cfg001_t pciercx_cfg001;
181 pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
182 pciercx_cfg001.s.msae = 1; /* Memory space enable. */
183 pciercx_cfg001.s.me = 1; /* Bus master enable. */
184 pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
185 pciercx_cfg001.s.see = 1; /* SERR# enable */
186 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
190 /* Advanced Error Recovery Message Enables */
191 /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
192 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
193 /* Use CVMX_PCIERCX_CFG067 hardware default */
194 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
197 /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
199 cvmx_pciercx_cfg032_t pciercx_cfg032;
200 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
201 pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
202 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
205 /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */
206 // FIXME: Anything needed here?
208 /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
209 /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
211 /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
212 cvmx_pciercx_cfg006_t pciercx_cfg006;
213 pciercx_cfg006.u32 = 0;
214 pciercx_cfg006.s.pbnum = 1;
215 pciercx_cfg006.s.sbnum = 1;
216 pciercx_cfg006.s.subbnum = 1;
217 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
220 /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
221 /* Most applications should disable the memory-mapped I/O BAR by */
222 /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
224 cvmx_pciercx_cfg008_t pciercx_cfg008;
225 pciercx_cfg008.u32 = 0;
226 pciercx_cfg008.s.mb_addr = 0x100;
227 pciercx_cfg008.s.ml_addr = 0;
228 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
231 /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
232 /* Most applications should disable the prefetchable BAR by setting */
233 /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
234 /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
236 cvmx_pciercx_cfg009_t pciercx_cfg009;
237 cvmx_pciercx_cfg010_t pciercx_cfg010;
238 cvmx_pciercx_cfg011_t pciercx_cfg011;
239 pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
240 pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
241 pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
242 pciercx_cfg009.s.lmem_base = 0x100;
243 pciercx_cfg009.s.lmem_limit = 0;
244 pciercx_cfg010.s.umem_base = 0x100;
245 pciercx_cfg011.s.umem_limit = 0;
246 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
247 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
248 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
251 /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
252 /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
254 cvmx_pciercx_cfg035_t pciercx_cfg035;
255 pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
256 pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
257 pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
258 pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
259 pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
260 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
263 /* Advanced Error Recovery Interrupt Enables */
264 /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
266 cvmx_pciercx_cfg075_t pciercx_cfg075;
267 pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
268 pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
269 pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
270 pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
271 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
274 /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
275 /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
277 cvmx_pciercx_cfg034_t pciercx_cfg034;
278 pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
279 pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
280 pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
281 pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
282 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
289 * Initialize a host mode PCIe link. This function takes a PCIe
290 * port from reset to a link up state. Software can then begin
291 * configuring the rest of the link.
293 * @param pcie_port PCIe port to initialize
295 * @return Zero on success
297 static int __cvmx_pcie_rc_initialize_link(int pcie_port)
299 uint64_t start_cycle;
300 cvmx_pescx_ctl_status_t pescx_ctl_status;
301 cvmx_pciercx_cfg452_t pciercx_cfg452;
302 cvmx_pciercx_cfg032_t pciercx_cfg032;
303 cvmx_pciercx_cfg448_t pciercx_cfg448;
305 /* Set the lane width */
306 pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
307 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
308 if (pescx_ctl_status.s.qlm_cfg == 0)
310 /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
311 pciercx_cfg452.s.lme = 0xf;
315 /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
316 pciercx_cfg452.s.lme = 0x7;
318 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
320 /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
321 cause bus errors on 64bit memory reads. Turning off length error
322 checking fixes this */
323 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
325 cvmx_pciercx_cfg455_t pciercx_cfg455;
326 pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
327 pciercx_cfg455.s.m_cpl_len_err = 1;
328 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
331 /* Lane swap needs to be manually enabled for CN52XX */
332 if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
334 pescx_ctl_status.s.lane_swp = 1;
335 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
338 /* Bring up the link */
339 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
340 pescx_ctl_status.s.lnk_enb = 1;
341 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
343 /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
344 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
345 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
347 /* Wait for the link to come up */
348 start_cycle = cvmx_get_cycle();
351 if (cvmx_get_cycle() - start_cycle > 2*cvmx_sysinfo_get()->cpu_clock_hz)
353 cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
357 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
358 } while (pciercx_cfg032.s.dlla == 0);
360 /* Update the Replay Time Limit. Empirically, some PCIe devices take a
361 little longer to respond than expected under load. As a workaround for
362 this we configure the Replay Time Limit to the value expected for a 512
363 byte MPS instead of our actual 256 byte MPS. The numbers below are
364 directly from the PCIe spec table 3-4 */
365 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
366 switch (pciercx_cfg032.s.nlw)
369 pciercx_cfg448.s.rtl = 1677;
371 case 2: /* 2 lanes */
372 pciercx_cfg448.s.rtl = 867;
374 case 4: /* 4 lanes */
375 pciercx_cfg448.s.rtl = 462;
377 case 8: /* 8 lanes */
378 pciercx_cfg448.s.rtl = 258;
381 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
388 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
390 * @param pcie_port PCIe port to initialize
392 * @return Zero on success
394 int cvmx_pcie_rc_initialize(int pcie_port)
397 cvmx_ciu_soft_prst_t ciu_soft_prst;
398 cvmx_pescx_bist_status_t pescx_bist_status;
399 cvmx_pescx_bist_status2_t pescx_bist_status2;
400 cvmx_npei_ctl_status_t npei_ctl_status;
401 cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
402 cvmx_npei_mem_access_subidx_t mem_access_subid;
403 cvmx_npei_dbg_data_t npei_dbg_data;
404 cvmx_pescx_ctl_status2_t pescx_ctl_status2;
405 cvmx_pciercx_cfg032_t pciercx_cfg032;
408 /* Make sure we aren't trying to setup a target mode interface in host mode */
409 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
410 if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
412 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port0, but port0 is not in host mode\n");
416 /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
417 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
419 npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
420 if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
422 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
427 /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
428 npei_ctl_status.s.arb = 1;
429 /* Allow up to 0x20 config retries */
430 npei_ctl_status.s.cfg_rtry = 0x20;
431 /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
432 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
434 npei_ctl_status.s.p0_ntags = 0x20;
435 npei_ctl_status.s.p1_ntags = 0x20;
437 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
439 /* Bring the PCIe out of reset */
440 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
442 /* The EBH5200 board swapped the PCIe reset lines on the board. As a
443 workaround for this bug, we bring both PCIe ports out of reset at
444 the same time instead of on separate calls. So for port 0, we bring
445 both out of reset and do nothing on port 1 */
448 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
449 /* After a chip reset the PCIe will also be in reset. If it isn't,
450 most likely someone is trying to init it again without a proper
452 if (ciu_soft_prst.s.soft_prst == 0)
454 /* Reset the ports */
455 ciu_soft_prst.s.soft_prst = 1;
456 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
457 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
458 ciu_soft_prst.s.soft_prst = 1;
459 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
460 /* Wait until pcie resets the ports. */
461 cvmx_wait_usec(2000);
463 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
464 ciu_soft_prst.s.soft_prst = 0;
465 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
466 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
467 ciu_soft_prst.s.soft_prst = 0;
468 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
473 /* The normal case: The PCIe ports are completely separate and can be
474 brought out of reset independently */
476 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
478 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
479 /* After a chip reset the PCIe will also be in reset. If it isn't,
480 most likely someone is trying to init it again without a proper
482 if (ciu_soft_prst.s.soft_prst == 0)
485 ciu_soft_prst.s.soft_prst = 1;
487 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
489 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
490 /* Wait until pcie resets the ports. */
491 cvmx_wait_usec(2000);
495 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
496 ciu_soft_prst.s.soft_prst = 0;
497 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
501 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
502 ciu_soft_prst.s.soft_prst = 0;
503 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
507 /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
508 PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
511 /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
512 CN52XX, so we only probe it on newer chips */
513 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
515 /* Clear PCLK_RUN so we can check if the clock is running */
516 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
517 pescx_ctl_status2.s.pclk_run = 1;
518 cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
519 /* Now that we cleared PCLK_RUN, wait for it to be set again telling
520 us the clock is running */
521 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
522 cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
524 cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
529 /* Check and make sure PCIe came out of reset. If it doesn't the board
530 probably hasn't wired the clocks up and the interface should be
532 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
533 if (pescx_ctl_status2.s.pcierst)
535 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
539 /* Check BIST2 status. If any bits are set skip this interface. This
540 is an attempt to catch PCIE-813 on pass 1 parts */
541 pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
542 if (pescx_bist_status2.u64)
544 cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
548 /* Check BIST status */
549 pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
550 if (pescx_bist_status.u64)
551 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
553 /* Initialize the config space CSRs */
554 __cvmx_pcie_rc_initialize_config_space(pcie_port);
556 /* Bring the link up */
557 if (__cvmx_pcie_rc_initialize_link(pcie_port))
559 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n");
563 /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
564 npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
565 npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
566 npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
567 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
569 /* Setup Mem access SubDIDs */
570 mem_access_subid.u64 = 0;
571 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
572 mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
573 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
574 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
575 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
576 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
577 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
578 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
579 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
581 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
582 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
584 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
585 mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
588 /* Disable the peer to peer forwarding register. This must be setup
589 by the OS after it enumerates the bus and assigns addresses to the
593 cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
594 cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
597 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
598 cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
600 /* Disable Octeon's BAR1. It isn't needed in RC mode since BAR2
601 maps all of memory. BAR2 also maps 256MB-512MB into the 2nd
603 cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1);
605 /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
606 where they overlap. It also overlaps with the device addresses, so
607 make sure the peer to peer forwarding is set right */
608 cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
610 /* Setup BAR2 attributes */
611 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
612 /* Â PTLP_RO,CTLP_RO should normally be set (except for debug). */
613 /* Â WAIT_COM=0 will likely work for all applications. */
614 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
617 cvmx_npei_ctl_port1_t npei_ctl_port;
618 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
619 npei_ctl_port.s.bar2_enb = 1;
620 npei_ctl_port.s.bar2_esx = 1;
621 npei_ctl_port.s.bar2_cax = 0;
622 npei_ctl_port.s.ptlp_ro = 1;
623 npei_ctl_port.s.ctlp_ro = 1;
624 npei_ctl_port.s.wait_com = 0;
625 npei_ctl_port.s.waitl_com = 0;
626 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
630 cvmx_npei_ctl_port0_t npei_ctl_port;
631 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
632 npei_ctl_port.s.bar2_enb = 1;
633 npei_ctl_port.s.bar2_esx = 1;
634 npei_ctl_port.s.bar2_cax = 0;
635 npei_ctl_port.s.ptlp_ro = 1;
636 npei_ctl_port.s.ctlp_ro = 1;
637 npei_ctl_port.s.wait_com = 0;
638 npei_ctl_port.s.waitl_com = 0;
639 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
642 /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
643 TLP ordering to not be preserved after multiple PCIe port resets. This
644 code detects this fault and corrects it by aligning the TLP counters
645 properly. Another link reset is then performed. See PCIE-13340 */
646 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
647 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
649 cvmx_npei_dbg_data_t dbg_data;
650 int old_in_fif_p_count;
653 int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
656 /* Choose a write address of 1MB. It should be harmless as all bars
657 haven't been setup */
658 uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
660 /* Make sure at least in_p_offset have been executed before we try and
661 read in_fif_p_count */
665 cvmx_write64_uint32(write_address, 0);
669 /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
670 unstable sometimes so read it twice with a write between the reads.
671 This way we can tell the value is good as it will increment by one
673 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
674 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
677 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
678 old_in_fif_p_count = dbg_data.s.data & 0xff;
679 cvmx_write64_uint32(write_address, 0);
681 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
682 in_fif_p_count = dbg_data.s.data & 0xff;
683 } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
685 /* Update in_fif_p_count for it's offset with respect to out_p_count */
686 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
688 /* Read the OUT_P_COUNT from the debug select */
689 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
690 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
691 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
692 out_p_count = (dbg_data.s.data>>1) & 0xff;
694 /* Check that the two counters are aligned */
695 if (out_p_count != in_fif_p_count)
697 cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
698 while (in_fif_p_count != 0)
700 cvmx_write64_uint32(write_address, 0);
702 in_fif_p_count = (in_fif_p_count + 1) & 0xff;
704 /* The EBH5200 board swapped the PCIe reset lines on the board. This
705 means we must bring both links down and up, which will cause the
706 PCIe0 to need alignment again. Lots of messages will be displayed,
707 but everything should work */
708 if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
710 cvmx_pcie_rc_initialize(0);
711 /* Rety bringing this port up */
716 /* Display the link status */
717 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
718 cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
725 * Shutdown a PCIe port and put it in reset
727 * @param pcie_port PCIe port to shutdown
729 * @return Zero on success
731 int cvmx_pcie_rc_shutdown(int pcie_port)
733 /* Wait for all pending operations to complete */
734 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
735 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
740 cvmx_ciu_soft_prst_t ciu_soft_prst;
741 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
742 ciu_soft_prst.s.soft_prst = 1;
743 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
747 cvmx_ciu_soft_prst_t ciu_soft_prst;
748 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
749 ciu_soft_prst.s.soft_prst = 1;
750 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
758 * Build a PCIe config space request address for a device
760 * @param pcie_port PCIe port to access
762 * @param dev Device ID
763 * @param fn Device sub function
764 * @param reg Register to access
766 * @return 64bit Octeon IO address
768 static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
770 cvmx_pcie_address_t pcie_addr;
771 cvmx_pciercx_cfg006_t pciercx_cfg006;
773 pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
774 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
778 pcie_addr.config.upper = 2;
779 pcie_addr.config.io = 1;
780 pcie_addr.config.did = 3;
781 pcie_addr.config.subdid = 1;
782 pcie_addr.config.es = 1;
783 pcie_addr.config.port = pcie_port;
784 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
785 pcie_addr.config.bus = bus;
786 pcie_addr.config.dev = dev;
787 pcie_addr.config.func = fn;
788 pcie_addr.config.reg = reg;
789 return pcie_addr.u64;
794 * Read 8bits from a Device's config space
796 * @param pcie_port PCIe port the device is on
798 * @param dev Device ID
799 * @param fn Device sub function
800 * @param reg Register to access
802 * @return Result of the read
804 uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
806 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
808 return cvmx_read64_uint8(address);
815 * Read 16bits from a Device's config space
817 * @param pcie_port PCIe port the device is on
819 * @param dev Device ID
820 * @param fn Device sub function
821 * @param reg Register to access
823 * @return Result of the read
825 uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
827 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
829 return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
836 * Read 32bits from a Device's config space
838 * @param pcie_port PCIe port the device is on
840 * @param dev Device ID
841 * @param fn Device sub function
842 * @param reg Register to access
844 * @return Result of the read
846 uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
848 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
850 return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
857 * Write 8bits to a Device's config space
859 * @param pcie_port PCIe port the device is on
861 * @param dev Device ID
862 * @param fn Device sub function
863 * @param reg Register to access
864 * @param val Value to write
866 void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
868 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
870 cvmx_write64_uint8(address, val);
875 * Write 16bits to a Device's config space
877 * @param pcie_port PCIe port the device is on
879 * @param dev Device ID
880 * @param fn Device sub function
881 * @param reg Register to access
882 * @param val Value to write
884 void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
886 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
888 cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
893 * Write 32bits to a Device's config space
895 * @param pcie_port PCIe port the device is on
897 * @param dev Device ID
898 * @param fn Device sub function
899 * @param reg Register to access
900 * @param val Value to write
902 void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
904 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
906 cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
911 * Read a PCIe config space register indirectly. This is used for
912 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
914 * @param pcie_port PCIe port to read from
915 * @param cfg_offset Address to read
919 uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
921 cvmx_pescx_cfg_rd_t pescx_cfg_rd;
922 pescx_cfg_rd.u64 = 0;
923 pescx_cfg_rd.s.addr = cfg_offset;
924 cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
925 pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
926 return pescx_cfg_rd.s.data;
931 * Write a PCIe config space register indirectly. This is used for
932 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
934 * @param pcie_port PCIe port to write to
935 * @param cfg_offset Address to write
936 * @param val Value to write
938 void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
940 cvmx_pescx_cfg_wr_t pescx_cfg_wr;
941 pescx_cfg_wr.u64 = 0;
942 pescx_cfg_wr.s.addr = cfg_offset;
943 pescx_cfg_wr.s.data = val;
944 cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
949 * Initialize a PCIe port for use in target(EP) mode.
951 * @return Zero on success
953 int cvmx_pcie_ep_initialize(void)
956 cvmx_npei_ctl_status_t npei_ctl_status;
958 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
959 if (npei_ctl_status.s.host_mode)
962 /* Enable bus master and memory */
963 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEP_CFG001, 0x6);
965 /* Max Payload Size (PCIE*_CFG030[MPS]) */
966 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
967 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
968 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
970 cvmx_pciercx_cfg030_t pciercx_cfg030;
971 pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
972 pciercx_cfg030.s.mps = 0; /* Max payload size = 128 bytes (Limit of most PCs) */
973 pciercx_cfg030.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
974 pciercx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
975 pciercx_cfg030.s.ns_en = 1; /* Enable no snoop. */
976 pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
977 pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
978 pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
979 pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
980 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
983 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
984 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
986 cvmx_npei_ctl_status2_t npei_ctl_status2;
987 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
988 npei_ctl_status2.s.mps = 0; /* Max payload size = 128 bytes (Limit of most PCs) */
989 npei_ctl_status2.s.mrrs = 0; /* Max read request size = 128 bytes for best Octeon DMA performance */
990 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
993 /* Setup Mem access SubDID 12 to access Host memory */
995 cvmx_npei_mem_access_subidx_t mem_access_subid;
996 mem_access_subid.u64 = 0;
997 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
998 mem_access_subid.s.nmerge = 1; /* Merging is allowed in this window. */
999 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
1000 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
1001 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1002 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
1003 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
1004 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
1005 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
1006 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1013 * Wait for posted PCIe read/writes to reach the other side of
1014 * the internal PCIe switch. This will insure that core
1015 * read/writes are posted before anything after this function
1016 * is called. This may be necessary when writing to memory that
1017 * will later be read using the DMA/PKT engines.
1019 * @param pcie_port PCIe port to wait for
1021 void cvmx_pcie_wait_for_pending(int pcie_port)
1023 cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1028 /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1029 description of how this code works */
1030 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1033 if (!npei_data_out_cnt.s.p1_fcnt)
1035 a = npei_data_out_cnt.s.p1_ucnt;
1036 b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1040 if (!npei_data_out_cnt.s.p0_fcnt)
1042 a = npei_data_out_cnt.s.p0_ucnt;
1043 b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1048 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1049 c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;