1 /***********************license start***************
2 * Copyright (c) 2003-2011 Cavium, Inc. <support@cavium.com>. All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
43 * Interface to PCIe as a host(RC) or target(EP)
45 * <hr>$Revision: 70030 $<hr>
47 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
48 #include <asm/octeon/cvmx.h>
49 #include <asm/octeon/cvmx-config.h>
50 #include <asm/octeon/cvmx-clock.h>
51 #include <asm/octeon/cvmx-ciu-defs.h>
52 #include <asm/octeon/cvmx-dpi-defs.h>
53 #include <asm/octeon/cvmx-mio-defs.h>
54 #include <asm/octeon/cvmx-npi-defs.h>
55 #include <asm/octeon/cvmx-npei-defs.h>
56 #include <asm/octeon/cvmx-pci-defs.h>
57 #include <asm/octeon/cvmx-pcieepx-defs.h>
58 #include <asm/octeon/cvmx-pciercx-defs.h>
59 #include <asm/octeon/cvmx-pemx-defs.h>
60 #include <asm/octeon/cvmx-pexp-defs.h>
61 #include <asm/octeon/cvmx-pescx-defs.h>
62 #include <asm/octeon/cvmx-sli-defs.h>
63 #include <asm/octeon/cvmx-sriox-defs.h>
64 #include <asm/octeon/cvmx-helper-jtag.h>
66 #ifdef CONFIG_CAVIUM_DECODE_RSL
67 #include <asm/octeon/cvmx-error.h>
69 #include <asm/octeon/cvmx-helper.h>
70 #include <asm/octeon/cvmx-helper-board.h>
71 #include <asm/octeon/cvmx-helper-errata.h>
72 #include <asm/octeon/cvmx-qlm.h>
73 #include <asm/octeon/cvmx-pcie.h>
74 #include <asm/octeon/cvmx-sysinfo.h>
75 #include <asm/octeon/cvmx-swap.h>
76 #include <asm/octeon/cvmx-wqe.h>
79 #if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
80 #include "cvmx-csr-db.h"
82 #include "cvmx-pcie.h"
83 #include "cvmx-sysinfo.h"
84 #include "cvmx-swap.h"
86 #if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
87 #include "cvmx-error.h"
89 #include "cvmx-helper-errata.h"
93 #define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
94 #define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
95 #define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
96 #define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
99 * Return the Core virtual base address for PCIe IO access. IOs are
100 * read/written as an offset from this address.
102 * @param pcie_port PCIe port the IO is for
104 * @return 64bit Octeon IO base address for read/write
106 uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
108 cvmx_pcie_address_t pcie_addr;
110 pcie_addr.io.upper = 0;
112 pcie_addr.io.did = 3;
113 pcie_addr.io.subdid = 2;
115 pcie_addr.io.port = pcie_port;
116 return pcie_addr.u64;
121 * Size of the IO address region returned at address
122 * cvmx_pcie_get_io_base_address()
124 * @param pcie_port PCIe port the IO is for
126 * @return Size of the IO window
128 uint64_t cvmx_pcie_get_io_size(int pcie_port)
135 * Return the Core virtual base address for PCIe MEM access. Memory is
136 * read/written as an offset from this address.
138 * @param pcie_port PCIe port the IO is for
140 * @return 64bit Octeon IO base address for read/write
142 uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
144 cvmx_pcie_address_t pcie_addr;
146 pcie_addr.mem.upper = 0;
147 pcie_addr.mem.io = 1;
148 pcie_addr.mem.did = 3;
149 pcie_addr.mem.subdid = 3 + pcie_port;
150 return pcie_addr.u64;
155 * Size of the Mem address region returned at address
156 * cvmx_pcie_get_mem_base_address()
158 * @param pcie_port PCIe port the IO is for
160 * @return Size of the Mem window
162 uint64_t cvmx_pcie_get_mem_size(int pcie_port)
170 * Initialize the RC config space CSRs
172 * @param pcie_port PCIe port to initialize
174 static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
176 /* Max Payload Size (PCIE*_CFG030[MPS]) */
177 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
178 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
179 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
181 cvmx_pciercx_cfg030_t pciercx_cfg030;
182 pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
183 if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
185 pciercx_cfg030.s.mps = MPS_CN5XXX;
186 pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
190 pciercx_cfg030.s.mps = MPS_CN6XXX;
191 pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
193 pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
194 pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
195 pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
196 pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
197 pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
198 pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
199 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
202 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
204 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
205 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
206 cvmx_npei_ctl_status2_t npei_ctl_status2;
207 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
208 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
209 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
211 npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
213 npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
215 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
219 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
220 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
221 cvmx_dpi_sli_prtx_cfg_t prt_cfg;
222 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
223 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
224 prt_cfg.s.mps = MPS_CN6XXX;
225 prt_cfg.s.mrrs = MRRS_CN6XXX;
226 /* Max outstanding load request. */
228 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
230 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
231 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
232 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
235 /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
237 cvmx_pciercx_cfg070_t pciercx_cfg070;
238 pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
239 pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
240 pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
241 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
244 /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
245 /* ME and MSAE should always be set. */
246 /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
247 /* System Error Message Enable (PCIE*_CFG001[SEE]) */
249 cvmx_pciercx_cfg001_t pciercx_cfg001;
250 pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
251 pciercx_cfg001.s.msae = 1; /* Memory space enable. */
252 pciercx_cfg001.s.me = 1; /* Bus master enable. */
253 pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
254 pciercx_cfg001.s.see = 1; /* SERR# enable */
255 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
259 /* Advanced Error Recovery Message Enables */
260 /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
261 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
262 /* Use CVMX_PCIERCX_CFG067 hardware default */
263 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
266 /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
268 cvmx_pciercx_cfg032_t pciercx_cfg032;
269 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
270 pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
271 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
274 /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
275 /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
277 /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
278 cvmx_pciercx_cfg006_t pciercx_cfg006;
279 pciercx_cfg006.u32 = 0;
280 pciercx_cfg006.s.pbnum = 1;
281 pciercx_cfg006.s.sbnum = 1;
282 pciercx_cfg006.s.subbnum = 1;
283 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
286 /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
287 /* Most applications should disable the memory-mapped I/O BAR by */
288 /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
290 cvmx_pciercx_cfg008_t pciercx_cfg008;
291 pciercx_cfg008.u32 = 0;
292 pciercx_cfg008.s.mb_addr = 0x100;
293 pciercx_cfg008.s.ml_addr = 0;
294 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
297 /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
298 /* Most applications should disable the prefetchable BAR by setting */
299 /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
300 /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
302 cvmx_pciercx_cfg009_t pciercx_cfg009;
303 cvmx_pciercx_cfg010_t pciercx_cfg010;
304 cvmx_pciercx_cfg011_t pciercx_cfg011;
305 pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
306 pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
307 pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
308 pciercx_cfg009.s.lmem_base = 0x100;
309 pciercx_cfg009.s.lmem_limit = 0;
310 pciercx_cfg010.s.umem_base = 0x100;
311 pciercx_cfg011.s.umem_limit = 0;
312 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
313 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
314 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
317 /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
318 /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
320 cvmx_pciercx_cfg035_t pciercx_cfg035;
321 pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
322 pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
323 pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
324 pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
325 pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
326 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
329 /* Advanced Error Recovery Interrupt Enables */
330 /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
332 cvmx_pciercx_cfg075_t pciercx_cfg075;
333 pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
334 pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
335 pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
336 pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
337 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
340 /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
341 /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
343 cvmx_pciercx_cfg034_t pciercx_cfg034;
344 pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
345 pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
346 pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
347 pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
348 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
354 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
355 * port from reset to a link up state. Software can then begin
356 * configuring the rest of the link.
358 * @param pcie_port PCIe port to initialize
360 * @return Zero on success
362 static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
364 uint64_t start_cycle;
365 cvmx_pescx_ctl_status_t pescx_ctl_status;
366 cvmx_pciercx_cfg452_t pciercx_cfg452;
367 cvmx_pciercx_cfg032_t pciercx_cfg032;
368 cvmx_pciercx_cfg448_t pciercx_cfg448;
370 /* Set the lane width */
371 pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
372 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
373 if (pescx_ctl_status.s.qlm_cfg == 0)
375 /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
376 pciercx_cfg452.s.lme = 0xf;
380 /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
381 pciercx_cfg452.s.lme = 0x7;
383 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
385 /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
386 cause bus errors on 64bit memory reads. Turning off length error
387 checking fixes this */
388 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
390 cvmx_pciercx_cfg455_t pciercx_cfg455;
391 pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
392 pciercx_cfg455.s.m_cpl_len_err = 1;
393 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
396 /* Lane swap needs to be manually enabled for CN52XX */
397 if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
399 switch (cvmx_sysinfo_get()->board_type)
401 #if defined(OCTEON_VENDOR_LANNER)
402 case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
406 pescx_ctl_status.s.lane_swp = 1;
409 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
412 /* Bring up the link */
413 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
414 pescx_ctl_status.s.lnk_enb = 1;
415 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
417 /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
418 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
419 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
421 /* Wait for the link to come up */
422 start_cycle = cvmx_get_cycle();
425 if (cvmx_get_cycle() - start_cycle > 100*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
427 cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
431 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
432 } while (pciercx_cfg032.s.dlla == 0);
434 /* Clear all pending errors */
435 cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
437 /* Update the Replay Time Limit. Empirically, some PCIe devices take a
438 little longer to respond than expected under load. As a workaround for
439 this we configure the Replay Time Limit to the value expected for a 512
440 byte MPS instead of our actual 256 byte MPS. The numbers below are
441 directly from the PCIe spec table 3-4 */
442 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
443 switch (pciercx_cfg032.s.nlw)
446 pciercx_cfg448.s.rtl = 1677;
448 case 2: /* 2 lanes */
449 pciercx_cfg448.s.rtl = 867;
451 case 4: /* 4 lanes */
452 pciercx_cfg448.s.rtl = 462;
454 case 8: /* 8 lanes */
455 pciercx_cfg448.s.rtl = 258;
458 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
463 static inline void __cvmx_increment_ba(cvmx_sli_mem_access_subidx_t *pmas)
465 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
472 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
475 * @param pcie_port PCIe port to initialize
477 * @return Zero on success
479 static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
483 uint64_t addr_swizzle;
484 cvmx_ciu_soft_prst_t ciu_soft_prst;
485 cvmx_pescx_bist_status_t pescx_bist_status;
486 cvmx_pescx_bist_status2_t pescx_bist_status2;
487 cvmx_npei_ctl_status_t npei_ctl_status;
488 cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
489 cvmx_npei_mem_access_subidx_t mem_access_subid;
490 cvmx_npei_dbg_data_t npei_dbg_data;
491 cvmx_pescx_ctl_status2_t pescx_ctl_status2;
492 cvmx_pciercx_cfg032_t pciercx_cfg032;
493 cvmx_npei_bar1_indexx_t bar1_index;
496 /* Make sure we aren't trying to setup a target mode interface in host mode */
497 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
498 if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
500 cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
504 /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
505 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
507 npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
508 if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
510 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
515 /* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */
516 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) {
517 cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port);
521 /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
522 npei_ctl_status.s.arb = 1;
523 /* Allow up to 0x20 config retries */
524 npei_ctl_status.s.cfg_rtry = 0x20;
525 /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
526 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
528 npei_ctl_status.s.p0_ntags = 0x20;
529 npei_ctl_status.s.p1_ntags = 0x20;
531 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
533 /* Bring the PCIe out of reset */
534 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
536 /* The EBH5200 board swapped the PCIe reset lines on the board. As a
537 workaround for this bug, we bring both PCIe ports out of reset at
538 the same time instead of on separate calls. So for port 0, we bring
539 both out of reset and do nothing on port 1 */
542 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
543 /* After a chip reset the PCIe will also be in reset. If it isn't,
544 most likely someone is trying to init it again without a proper
546 if (ciu_soft_prst.s.soft_prst == 0)
548 /* Reset the ports */
549 ciu_soft_prst.s.soft_prst = 1;
550 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
551 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
552 ciu_soft_prst.s.soft_prst = 1;
553 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
554 /* Wait until pcie resets the ports. */
555 cvmx_wait_usec(2000);
557 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
558 ciu_soft_prst.s.soft_prst = 0;
559 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
560 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
561 ciu_soft_prst.s.soft_prst = 0;
562 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
567 /* The normal case: The PCIe ports are completely separate and can be
568 brought out of reset independently */
570 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
572 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
573 /* After a chip reset the PCIe will also be in reset. If it isn't,
574 most likely someone is trying to init it again without a proper
576 if (ciu_soft_prst.s.soft_prst == 0)
579 ciu_soft_prst.s.soft_prst = 1;
581 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
583 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
584 /* Wait until pcie resets the ports. */
585 cvmx_wait_usec(2000);
589 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
590 ciu_soft_prst.s.soft_prst = 0;
591 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
595 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
596 ciu_soft_prst.s.soft_prst = 0;
597 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
601 /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
602 PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
605 /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
606 CN52XX, so we only probe it on newer chips */
607 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
609 /* Clear PCLK_RUN so we can check if the clock is running */
610 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
611 pescx_ctl_status2.s.pclk_run = 1;
612 cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
613 /* Now that we cleared PCLK_RUN, wait for it to be set again telling
614 us the clock is running */
615 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
616 cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
618 cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
623 /* Check and make sure PCIe came out of reset. If it doesn't the board
624 probably hasn't wired the clocks up and the interface should be
626 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
627 if (pescx_ctl_status2.s.pcierst)
629 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
633 /* Check BIST2 status. If any bits are set skip this interface. This
634 is an attempt to catch PCIE-813 on pass 1 parts */
635 pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
636 if (pescx_bist_status2.u64)
638 cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
642 /* Check BIST status */
643 pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
644 if (pescx_bist_status.u64)
645 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
647 /* Initialize the config space CSRs */
648 __cvmx_pcie_rc_initialize_config_space(pcie_port);
650 /* Bring the link up */
651 if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
653 cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
657 /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
658 npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
659 npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
660 npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
661 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
663 /* Setup Mem access SubDIDs */
664 mem_access_subid.u64 = 0;
665 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
666 mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
667 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
668 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
669 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
670 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
671 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
672 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
673 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
675 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
676 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
678 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
679 mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
682 /* Disable the peer to peer forwarding register. This must be setup
683 by the OS after it enumerates the bus and assigns addresses to the
687 cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
688 cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
691 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
692 cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
694 /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
695 cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
698 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
699 bar1_index.s.ca = 1; /* Not Cached */
700 bar1_index.s.end_swp = 1; /* Endian Swap mode */
701 bar1_index.s.addr_v = 1; /* Valid entry */
703 base = pcie_port ? 16 : 0;
705 /* Big endian swizzle for 32-bit PEXP_NCB register. */
711 for (i = 0; i < 16; i++) {
712 cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
714 /* 256MB / 16 >> 22 == 4 */
715 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
718 /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
719 where they overlap. It also overlaps with the device addresses, so
720 make sure the peer to peer forwarding is set right */
721 cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
723 /* Setup BAR2 attributes */
724 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
725 /* Â PTLP_RO,CTLP_RO should normally be set (except for debug). */
726 /* Â WAIT_COM=0 will likely work for all applications. */
727 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
730 cvmx_npei_ctl_port1_t npei_ctl_port;
731 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
732 npei_ctl_port.s.bar2_enb = 1;
733 npei_ctl_port.s.bar2_esx = 1;
734 npei_ctl_port.s.bar2_cax = 0;
735 npei_ctl_port.s.ptlp_ro = 1;
736 npei_ctl_port.s.ctlp_ro = 1;
737 npei_ctl_port.s.wait_com = 0;
738 npei_ctl_port.s.waitl_com = 0;
739 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
743 cvmx_npei_ctl_port0_t npei_ctl_port;
744 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
745 npei_ctl_port.s.bar2_enb = 1;
746 npei_ctl_port.s.bar2_esx = 1;
747 npei_ctl_port.s.bar2_cax = 0;
748 npei_ctl_port.s.ptlp_ro = 1;
749 npei_ctl_port.s.ctlp_ro = 1;
750 npei_ctl_port.s.wait_com = 0;
751 npei_ctl_port.s.waitl_com = 0;
752 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
755 /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
756 TLP ordering to not be preserved after multiple PCIe port resets. This
757 code detects this fault and corrects it by aligning the TLP counters
758 properly. Another link reset is then performed. See PCIE-13340 */
759 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
760 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
762 cvmx_npei_dbg_data_t dbg_data;
763 int old_in_fif_p_count;
766 int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
769 /* Choose a write address of 1MB. It should be harmless as all bars
770 haven't been setup */
771 uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
773 /* Make sure at least in_p_offset have been executed before we try and
774 read in_fif_p_count */
778 cvmx_write64_uint32(write_address, 0);
782 /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
783 unstable sometimes so read it twice with a write between the reads.
784 This way we can tell the value is good as it will increment by one
786 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
787 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
790 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
791 old_in_fif_p_count = dbg_data.s.data & 0xff;
792 cvmx_write64_uint32(write_address, 0);
794 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
795 in_fif_p_count = dbg_data.s.data & 0xff;
796 } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
798 /* Update in_fif_p_count for it's offset with respect to out_p_count */
799 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
801 /* Read the OUT_P_COUNT from the debug select */
802 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
803 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
804 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
805 out_p_count = (dbg_data.s.data>>1) & 0xff;
807 /* Check that the two counters are aligned */
808 if (out_p_count != in_fif_p_count)
810 cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
811 while (in_fif_p_count != 0)
813 cvmx_write64_uint32(write_address, 0);
815 in_fif_p_count = (in_fif_p_count + 1) & 0xff;
817 /* The EBH5200 board swapped the PCIe reset lines on the board. This
818 means we must bring both links down and up, which will cause the
819 PCIe0 to need alignment again. Lots of messages will be displayed,
820 but everything should work */
821 if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
823 cvmx_pcie_rc_initialize(0);
824 /* Rety bringing this port up */
829 /* Display the link status */
830 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
831 cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
838 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
839 * port from reset to a link up state. Software can then begin
840 * configuring the rest of the link.
842 * @param pcie_port PCIe port to initialize
844 * @return Zero on success
846 static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
848 uint64_t start_cycle;
849 cvmx_pemx_ctl_status_t pem_ctl_status;
850 cvmx_pciercx_cfg032_t pciercx_cfg032;
851 cvmx_pciercx_cfg448_t pciercx_cfg448;
853 /* Bring up the link */
854 pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
855 pem_ctl_status.s.lnk_enb = 1;
856 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
858 /* Wait for the link to come up */
859 start_cycle = cvmx_get_cycle();
862 if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
865 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
866 } while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
868 /* Update the Replay Time Limit. Empirically, some PCIe devices take a
869 little longer to respond than expected under load. As a workaround for
870 this we configure the Replay Time Limit to the value expected for a 512
871 byte MPS instead of our actual 256 byte MPS. The numbers below are
872 directly from the PCIe spec table 3-4 */
873 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
874 switch (pciercx_cfg032.s.nlw)
877 pciercx_cfg448.s.rtl = 1677;
879 case 2: /* 2 lanes */
880 pciercx_cfg448.s.rtl = 867;
882 case 4: /* 4 lanes */
883 pciercx_cfg448.s.rtl = 462;
885 case 8: /* 8 lanes */
886 pciercx_cfg448.s.rtl = 258;
889 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
896 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
899 * @param pcie_port PCIe port to initialize
901 * @return Zero on success
903 static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
906 cvmx_ciu_soft_prst_t ciu_soft_prst;
907 cvmx_mio_rst_ctlx_t mio_rst_ctl;
908 cvmx_pemx_bar_ctl_t pemx_bar_ctl;
909 cvmx_pemx_ctl_status_t pemx_ctl_status;
910 cvmx_pemx_bist_status_t pemx_bist_status;
911 cvmx_pemx_bist_status2_t pemx_bist_status2;
912 cvmx_pciercx_cfg032_t pciercx_cfg032;
913 cvmx_pciercx_cfg515_t pciercx_cfg515;
914 cvmx_sli_ctl_portx_t sli_ctl_portx;
915 cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
916 cvmx_sli_mem_access_subidx_t mem_access_subid;
917 cvmx_pemx_bar1_indexx_t bar1_index;
920 /* Make sure this interface is PCIe */
921 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
923 /* Requires reading the MIO_QLMX_CFG register to figure
924 out the port type. */
927 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
928 qlm = 3 - (pcie_port * 2);
929 else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
931 cvmx_mio_qlmx_cfg_t qlm_cfg;
932 qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
933 if (qlm_cfg.s.qlm_cfg == 1)
936 /* PCIe is allowed only in QLM1, 1 PCIe port in x2 or
937 2 PCIe ports in x1 */
938 else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
940 status = cvmx_qlm_get_status(qlm);
941 if (status == 4 || status == 5)
943 cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
948 cvmx_dprintf("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
953 cvmx_dprintf("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
958 cvmx_dprintf("PCIe: Port %d is unknown, skipping.\n", pcie_port);
964 /* This code is so that the PCIe analyzer is able to see 63XX traffic */
965 cvmx_dprintf("PCIE : init for pcie analyzer.\n");
966 cvmx_helper_qlm_jtag_init();
967 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
968 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
969 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
970 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
971 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
972 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
973 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
974 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
975 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
976 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
977 cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
978 cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
979 cvmx_helper_qlm_jtag_update(pcie_port);
982 /* Make sure we aren't trying to setup a target mode interface in host mode */
983 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
984 ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX || OCTEON_IS_MODEL(OCTEON_CNF71XX)) ? (mio_rst_ctl.s.prtmode != 1) : (!mio_rst_ctl.s.host_mode));
987 cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
991 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
992 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
996 cvmx_ciu_qlm1_t ciu_qlm;
997 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
998 ciu_qlm.s.txbypass = 1;
999 ciu_qlm.s.txdeemph = 5;
1000 ciu_qlm.s.txmargin = 0x17;
1001 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1005 cvmx_ciu_qlm0_t ciu_qlm;
1006 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1007 ciu_qlm.s.txbypass = 1;
1008 ciu_qlm.s.txdeemph = 5;
1009 ciu_qlm.s.txmargin = 0x17;
1010 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1013 /* Bring the PCIe out of reset */
1015 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1017 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1018 /* After a chip reset the PCIe will also be in reset. If it isn't,
1019 most likely someone is trying to init it again without a proper
1021 if (ciu_soft_prst.s.soft_prst == 0)
1023 /* Reset the port */
1024 ciu_soft_prst.s.soft_prst = 1;
1026 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1028 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1029 /* Wait until pcie resets the ports. */
1030 cvmx_wait_usec(2000);
1034 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1035 ciu_soft_prst.s.soft_prst = 0;
1036 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1040 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1041 ciu_soft_prst.s.soft_prst = 0;
1042 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1045 /* Wait for PCIe reset to complete */
1046 cvmx_wait_usec(1000);
1048 /* Check and make sure PCIe came out of reset. If it doesn't the board
1049 probably hasn't wired the clocks up and the interface should be
1051 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), cvmx_mio_rst_ctlx_t, rst_done, ==, 1, 10000))
1053 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
1057 /* Check BIST status */
1058 pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
1059 if (pemx_bist_status.u64)
1060 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
1061 pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
1062 /* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
1063 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
1064 pemx_bist_status2.u64 &= ~0x3full;
1065 if (pemx_bist_status2.u64)
1066 cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
1068 /* Initialize the config space CSRs */
1069 __cvmx_pcie_rc_initialize_config_space(pcie_port);
1071 /* Enable gen2 speed selection */
1072 pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1073 pciercx_cfg515.s.dsc = 1;
1074 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1076 /* Bring the link up */
1077 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1079 /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1080 gen2 and try again with only gen1 */
1081 cvmx_pciercx_cfg031_t pciercx_cfg031;
1082 pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1083 pciercx_cfg031.s.mls = 1;
1084 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
1085 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1087 cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1092 /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1093 sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1094 sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
1095 sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
1096 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1098 /* Setup Mem access SubDIDs */
1099 mem_access_subid.u64 = 0;
1100 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1101 mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
1102 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
1103 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
1104 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1105 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1106 /* PCIe Adddress Bits <63:34>. */
1107 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1108 mem_access_subid.cn68xx.ba = 0;
1110 mem_access_subid.cn63xx.ba = 0;
1112 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1113 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1115 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1116 /* Set each SUBID to extend the addressable range */
1117 __cvmx_increment_ba(&mem_access_subid);
1120 if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
1122 /* Disable the peer to peer forwarding register. This must be setup
1123 by the OS after it enumerates the bus and assigns addresses to the
1127 cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1128 cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1132 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1133 cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1135 /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1136 where they overlap. It also overlaps with the device addresses, so
1137 make sure the peer to peer forwarding is set right */
1138 cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1140 /* Setup BAR2 attributes */
1141 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1142 /* Â PTLP_RO,CTLP_RO should normally be set (except for debug). */
1143 /* Â WAIT_COM=0 will likely work for all applications. */
1144 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1145 pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1146 pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
1147 pemx_bar_ctl.s.bar2_enb = 1;
1148 pemx_bar_ctl.s.bar2_esx = 1;
1149 pemx_bar_ctl.s.bar2_cax = 0;
1150 cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1151 sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1152 sli_ctl_portx.s.ptlp_ro = 1;
1153 sli_ctl_portx.s.ctlp_ro = 1;
1154 sli_ctl_portx.s.wait_com = 0;
1155 sli_ctl_portx.s.waitl_com = 0;
1156 cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1158 /* BAR1 follows BAR2 */
1159 cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1162 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1163 bar1_index.s.ca = 1; /* Not Cached */
1164 bar1_index.s.end_swp = 1; /* Endian Swap mode */
1165 bar1_index.s.addr_v = 1; /* Valid entry */
1167 for (i = 0; i < 16; i++) {
1168 cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1169 /* 256MB / 16 >> 22 == 4 */
1170 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1173 /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1175 pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1176 pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1177 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1179 /* Display the link status */
1180 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1181 cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1187 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1189 * @param pcie_port PCIe port to initialize
1191 * @return Zero on success
1193 int cvmx_pcie_rc_initialize(int pcie_port)
1196 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1197 result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1199 result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1200 #if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
1202 cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1209 * Shutdown a PCIe port and put it in reset
1211 * @param pcie_port PCIe port to shutdown
1213 * @return Zero on success
1215 int cvmx_pcie_rc_shutdown(int pcie_port)
1217 #if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
1218 cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1220 /* Wait for all pending operations to complete */
1221 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1223 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1224 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1228 if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1229 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1235 cvmx_ciu_soft_prst_t ciu_soft_prst;
1236 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1237 ciu_soft_prst.s.soft_prst = 1;
1238 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1242 cvmx_ciu_soft_prst_t ciu_soft_prst;
1243 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1244 ciu_soft_prst.s.soft_prst = 1;
1245 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1253 * Build a PCIe config space request address for a device
1255 * @param pcie_port PCIe port to access
1256 * @param bus Sub bus
1257 * @param dev Device ID
1258 * @param fn Device sub function
1259 * @param reg Register to access
1261 * @return 64bit Octeon IO address
1263 static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1265 cvmx_pcie_address_t pcie_addr;
1266 cvmx_pciercx_cfg006_t pciercx_cfg006;
1268 pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1269 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1273 pcie_addr.config.upper = 2;
1274 pcie_addr.config.io = 1;
1275 pcie_addr.config.did = 3;
1276 pcie_addr.config.subdid = 1;
1277 pcie_addr.config.es = 1;
1278 pcie_addr.config.port = pcie_port;
1279 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1280 pcie_addr.config.bus = bus;
1281 pcie_addr.config.dev = dev;
1282 pcie_addr.config.func = fn;
1283 pcie_addr.config.reg = reg;
1284 return pcie_addr.u64;
1289 * Read 8bits from a Device's config space
1291 * @param pcie_port PCIe port the device is on
1292 * @param bus Sub bus
1293 * @param dev Device ID
1294 * @param fn Device sub function
1295 * @param reg Register to access
1297 * @return Result of the read
1299 uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1301 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1303 return cvmx_read64_uint8(address);
1310 * Read 16bits from a Device's config space
1312 * @param pcie_port PCIe port the device is on
1313 * @param bus Sub bus
1314 * @param dev Device ID
1315 * @param fn Device sub function
1316 * @param reg Register to access
1318 * @return Result of the read
1320 uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1322 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1324 return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1331 * Read 32bits from a Device's config space
1333 * @param pcie_port PCIe port the device is on
1334 * @param bus Sub bus
1335 * @param dev Device ID
1336 * @param fn Device sub function
1337 * @param reg Register to access
1339 * @return Result of the read
1341 uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1345 address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1347 return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1354 * Write 8bits to a Device's config space
1356 * @param pcie_port PCIe port the device is on
1357 * @param bus Sub bus
1358 * @param dev Device ID
1359 * @param fn Device sub function
1360 * @param reg Register to access
1361 * @param val Value to write
1363 void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1365 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1367 cvmx_write64_uint8(address, val);
1372 * Write 16bits to a Device's config space
1374 * @param pcie_port PCIe port the device is on
1375 * @param bus Sub bus
1376 * @param dev Device ID
1377 * @param fn Device sub function
1378 * @param reg Register to access
1379 * @param val Value to write
1381 void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1383 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1385 cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1390 * Write 32bits to a Device's config space
1392 * @param pcie_port PCIe port the device is on
1393 * @param bus Sub bus
1394 * @param dev Device ID
1395 * @param fn Device sub function
1396 * @param reg Register to access
1397 * @param val Value to write
1399 void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1401 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1403 cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1408 * Read a PCIe config space register indirectly. This is used for
1409 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1411 * @param pcie_port PCIe port to read from
1412 * @param cfg_offset Address to read
1414 * @return Value read
1416 uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1418 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1420 cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1421 pescx_cfg_rd.u64 = 0;
1422 pescx_cfg_rd.s.addr = cfg_offset;
1423 cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1424 pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1425 return pescx_cfg_rd.s.data;
1429 cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1430 pemx_cfg_rd.u64 = 0;
1431 pemx_cfg_rd.s.addr = cfg_offset;
1432 cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1433 pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1434 return pemx_cfg_rd.s.data;
1440 * Write a PCIe config space register indirectly. This is used for
1441 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1443 * @param pcie_port PCIe port to write to
1444 * @param cfg_offset Address to write
1445 * @param val Value to write
1447 void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1449 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1451 cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1452 pescx_cfg_wr.u64 = 0;
1453 pescx_cfg_wr.s.addr = cfg_offset;
1454 pescx_cfg_wr.s.data = val;
1455 cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1459 cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1460 pemx_cfg_wr.u64 = 0;
1461 pemx_cfg_wr.s.addr = cfg_offset;
1462 pemx_cfg_wr.s.data = val;
1463 cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1469 * Initialize a PCIe port for use in target(EP) mode.
1471 * @param pcie_port PCIe port to initialize
1473 * @return Zero on success
1475 int cvmx_pcie_ep_initialize(int pcie_port)
1477 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1479 cvmx_npei_ctl_status_t npei_ctl_status;
1480 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1481 if (npei_ctl_status.s.host_mode)
1486 cvmx_mio_rst_ctlx_t mio_rst_ctl;
1488 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1489 ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX) ? (mio_rst_ctl.s.prtmode != 0) : mio_rst_ctl.s.host_mode);
1494 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1495 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1499 cvmx_ciu_qlm1_t ciu_qlm;
1500 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1501 ciu_qlm.s.txbypass = 1;
1502 ciu_qlm.s.txdeemph = 5;
1503 ciu_qlm.s.txmargin = 0x17;
1504 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1508 cvmx_ciu_qlm0_t ciu_qlm;
1509 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1510 ciu_qlm.s.txbypass = 1;
1511 ciu_qlm.s.txdeemph = 5;
1512 ciu_qlm.s.txmargin = 0x17;
1513 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1517 /* Enable bus master and memory */
1518 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1520 /* Max Payload Size (PCIE*_CFG030[MPS]) */
1521 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1522 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1523 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1525 cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1526 pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1527 if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1529 pcieepx_cfg030.s.mps = MPS_CN5XXX;
1530 pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1534 pcieepx_cfg030.s.mps = MPS_CN6XXX;
1535 pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1537 pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1538 pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1539 pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1540 pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1541 pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1542 pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1543 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1546 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1548 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1549 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1550 cvmx_npei_ctl_status2_t npei_ctl_status2;
1551 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1552 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1553 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1554 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1558 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1559 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1560 cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1561 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1562 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1563 prt_cfg.s.mps = MPS_CN6XXX;
1564 prt_cfg.s.mrrs = MRRS_CN6XXX;
1565 /* Max outstanding load request. */
1566 prt_cfg.s.molr = 32;
1567 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1569 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1570 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1571 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1574 /* Setup Mem access SubDID 12 to access Host memory */
1575 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1577 cvmx_npei_mem_access_subidx_t mem_access_subid;
1578 mem_access_subid.u64 = 0;
1579 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1580 mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */
1581 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
1582 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
1583 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1584 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
1585 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
1586 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
1587 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
1588 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1592 cvmx_sli_mem_access_subidx_t mem_access_subid;
1593 mem_access_subid.u64 = 0;
1594 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1595 mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */
1596 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
1597 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
1598 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1599 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1600 /* PCIe Adddress Bits <63:34>. */
1601 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1602 mem_access_subid.cn68xx.ba = 0;
1604 mem_access_subid.cn63xx.ba = 0;
1605 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1612 * Wait for posted PCIe read/writes to reach the other side of
1613 * the internal PCIe switch. This will insure that core
1614 * read/writes are posted before anything after this function
1615 * is called. This may be necessary when writing to memory that
1616 * will later be read using the DMA/PKT engines.
1618 * @param pcie_port PCIe port to wait for
1620 void cvmx_pcie_wait_for_pending(int pcie_port)
1622 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1624 cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1629 /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1630 description of how this code works */
1631 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1634 if (!npei_data_out_cnt.s.p1_fcnt)
1636 a = npei_data_out_cnt.s.p1_ucnt;
1637 b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1641 if (!npei_data_out_cnt.s.p0_fcnt)
1643 a = npei_data_out_cnt.s.p0_ucnt;
1644 b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1649 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1650 c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1665 cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1670 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1673 if (!sli_data_out_cnt.s.p1_fcnt)
1675 a = sli_data_out_cnt.s.p1_ucnt;
1676 b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1680 if (!sli_data_out_cnt.s.p0_fcnt)
1682 a = sli_data_out_cnt.s.p0_ucnt;
1683 b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1688 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1689 c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;