1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
49 * Interface to PCIe as a host(RC) or target(EP)
51 * <hr>$Revision: 52004 $<hr>
53 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
54 #include <asm/octeon/cvmx.h>
55 #include <asm/octeon/cvmx-config.h>
56 #include <asm/octeon/cvmx-clock.h>
57 #include <asm/octeon/cvmx-ciu-defs.h>
58 #include <asm/octeon/cvmx-dpi-defs.h>
59 #include <asm/octeon/cvmx-npi-defs.h>
60 #include <asm/octeon/cvmx-npei-defs.h>
61 #include <asm/octeon/cvmx-pci-defs.h>
62 #include <asm/octeon/cvmx-pcieepx-defs.h>
63 #include <asm/octeon/cvmx-pciercx-defs.h>
64 #include <asm/octeon/cvmx-pemx-defs.h>
65 #include <asm/octeon/cvmx-pexp-defs.h>
66 #include <asm/octeon/cvmx-pescx-defs.h>
67 #include <asm/octeon/cvmx-sli-defs.h>
68 #include <asm/octeon/cvmx-sriox-defs.h>
70 #ifdef CONFIG_CAVIUM_DECODE_RSL
71 #include <asm/octeon/cvmx-error.h>
73 #include <asm/octeon/cvmx-helper.h>
74 #include <asm/octeon/cvmx-helper-board.h>
75 #include <asm/octeon/cvmx-helper-errata.h>
76 #include <asm/octeon/cvmx-pcie.h>
77 #include <asm/octeon/cvmx-sysinfo.h>
78 #include <asm/octeon/cvmx-swap.h>
79 #include <asm/octeon/cvmx-wqe.h>
82 #include "cvmx-csr-db.h"
83 #include "cvmx-pcie.h"
84 #include "cvmx-sysinfo.h"
85 #include "cvmx-swap.h"
87 #include "cvmx-error.h"
88 #include "cvmx-helper-errata.h"
91 #define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
92 #define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
93 #define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
94 #define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
97 * Return the Core virtual base address for PCIe IO access. IOs are
98 * read/written as an offset from this address.
100 * @param pcie_port PCIe port the IO is for
102 * @return 64bit Octeon IO base address for read/write
104 uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
106 cvmx_pcie_address_t pcie_addr;
108 pcie_addr.io.upper = 0;
110 pcie_addr.io.did = 3;
111 pcie_addr.io.subdid = 2;
113 pcie_addr.io.port = pcie_port;
114 return pcie_addr.u64;
119 * Size of the IO address region returned at address
120 * cvmx_pcie_get_io_base_address()
122 * @param pcie_port PCIe port the IO is for
124 * @return Size of the IO window
126 uint64_t cvmx_pcie_get_io_size(int pcie_port)
133 * Return the Core virtual base address for PCIe MEM access. Memory is
134 * read/written as an offset from this address.
136 * @param pcie_port PCIe port the IO is for
138 * @return 64bit Octeon IO base address for read/write
140 uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
142 cvmx_pcie_address_t pcie_addr;
144 pcie_addr.mem.upper = 0;
145 pcie_addr.mem.io = 1;
146 pcie_addr.mem.did = 3;
147 pcie_addr.mem.subdid = 3 + pcie_port;
148 return pcie_addr.u64;
153 * Size of the Mem address region returned at address
154 * cvmx_pcie_get_mem_base_address()
156 * @param pcie_port PCIe port the IO is for
158 * @return Size of the Mem window
160 uint64_t cvmx_pcie_get_mem_size(int pcie_port)
168 * Initialize the RC config space CSRs
170 * @param pcie_port PCIe port to initialize
172 static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
174 /* Max Payload Size (PCIE*_CFG030[MPS]) */
175 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
176 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
177 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
179 cvmx_pciercx_cfg030_t pciercx_cfg030;
180 pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
181 if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
183 pciercx_cfg030.s.mps = MPS_CN5XXX;
184 pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
188 pciercx_cfg030.s.mps = MPS_CN6XXX;
189 pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
191 pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
192 pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
193 pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
194 pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
195 pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
196 pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
197 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
200 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
202 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
203 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
204 cvmx_npei_ctl_status2_t npei_ctl_status2;
205 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
206 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
207 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
209 npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
211 npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
213 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
217 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
218 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
219 cvmx_dpi_sli_prtx_cfg_t prt_cfg;
220 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
221 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
222 prt_cfg.s.mps = MPS_CN6XXX;
223 prt_cfg.s.mrrs = MRRS_CN6XXX;
224 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
226 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
227 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
228 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
231 /* ECRC Generation (PCIE*_CFG070[GE,CE]) */
233 cvmx_pciercx_cfg070_t pciercx_cfg070;
234 pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
235 pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
236 pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
237 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
240 /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
241 /* ME and MSAE should always be set. */
242 /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
243 /* System Error Message Enable (PCIE*_CFG001[SEE]) */
245 cvmx_pciercx_cfg001_t pciercx_cfg001;
246 pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
247 pciercx_cfg001.s.msae = 1; /* Memory space enable. */
248 pciercx_cfg001.s.me = 1; /* Bus master enable. */
249 pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
250 pciercx_cfg001.s.see = 1; /* SERR# enable */
251 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
255 /* Advanced Error Recovery Message Enables */
256 /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
257 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
258 /* Use CVMX_PCIERCX_CFG067 hardware default */
259 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
262 /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
264 cvmx_pciercx_cfg032_t pciercx_cfg032;
265 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
266 pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
267 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
270 /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
271 /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
273 /* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
274 cvmx_pciercx_cfg006_t pciercx_cfg006;
275 pciercx_cfg006.u32 = 0;
276 pciercx_cfg006.s.pbnum = 1;
277 pciercx_cfg006.s.sbnum = 1;
278 pciercx_cfg006.s.subbnum = 1;
279 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
282 /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
283 /* Most applications should disable the memory-mapped I/O BAR by */
284 /* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
286 cvmx_pciercx_cfg008_t pciercx_cfg008;
287 pciercx_cfg008.u32 = 0;
288 pciercx_cfg008.s.mb_addr = 0x100;
289 pciercx_cfg008.s.ml_addr = 0;
290 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
293 /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
294 /* Most applications should disable the prefetchable BAR by setting */
295 /* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
296 /* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
298 cvmx_pciercx_cfg009_t pciercx_cfg009;
299 cvmx_pciercx_cfg010_t pciercx_cfg010;
300 cvmx_pciercx_cfg011_t pciercx_cfg011;
301 pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
302 pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
303 pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
304 pciercx_cfg009.s.lmem_base = 0x100;
305 pciercx_cfg009.s.lmem_limit = 0;
306 pciercx_cfg010.s.umem_base = 0x100;
307 pciercx_cfg011.s.umem_limit = 0;
308 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
309 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
310 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
313 /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
314 /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
316 cvmx_pciercx_cfg035_t pciercx_cfg035;
317 pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
318 pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
319 pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
320 pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
321 pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
322 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
325 /* Advanced Error Recovery Interrupt Enables */
326 /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
328 cvmx_pciercx_cfg075_t pciercx_cfg075;
329 pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
330 pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
331 pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
332 pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
333 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
336 /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
337 /* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
339 cvmx_pciercx_cfg034_t pciercx_cfg034;
340 pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
341 pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
342 pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
343 pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
344 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
350 * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
351 * port from reset to a link up state. Software can then begin
352 * configuring the rest of the link.
354 * @param pcie_port PCIe port to initialize
356 * @return Zero on success
358 static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
360 uint64_t start_cycle;
361 cvmx_pescx_ctl_status_t pescx_ctl_status;
362 cvmx_pciercx_cfg452_t pciercx_cfg452;
363 cvmx_pciercx_cfg032_t pciercx_cfg032;
364 cvmx_pciercx_cfg448_t pciercx_cfg448;
366 /* Set the lane width */
367 pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
368 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
369 if (pescx_ctl_status.s.qlm_cfg == 0)
371 /* We're in 8 lane (56XX) or 4 lane (54XX) mode */
372 pciercx_cfg452.s.lme = 0xf;
376 /* We're in 4 lane (56XX) or 2 lane (52XX) mode */
377 pciercx_cfg452.s.lme = 0x7;
379 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
381 /* CN52XX pass 1.x has an errata where length mismatches on UR responses can
382 cause bus errors on 64bit memory reads. Turning off length error
383 checking fixes this */
384 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
386 cvmx_pciercx_cfg455_t pciercx_cfg455;
387 pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
388 pciercx_cfg455.s.m_cpl_len_err = 1;
389 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
392 /* Lane swap needs to be manually enabled for CN52XX */
393 if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
395 switch (cvmx_sysinfo_get()->board_type)
397 #if defined(OCTEON_VENDOR_LANNER)
398 case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
402 pescx_ctl_status.s.lane_swp = 1;
405 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
408 /* Bring up the link */
409 pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
410 pescx_ctl_status.s.lnk_enb = 1;
411 cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
413 /* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
414 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
415 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
417 /* Wait for the link to come up */
418 start_cycle = cvmx_get_cycle();
421 if (cvmx_get_cycle() - start_cycle > 2*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
423 cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
427 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
428 } while (pciercx_cfg032.s.dlla == 0);
430 /* Clear all pending errors */
431 cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
433 /* Update the Replay Time Limit. Empirically, some PCIe devices take a
434 little longer to respond than expected under load. As a workaround for
435 this we configure the Replay Time Limit to the value expected for a 512
436 byte MPS instead of our actual 256 byte MPS. The numbers below are
437 directly from the PCIe spec table 3-4 */
438 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
439 switch (pciercx_cfg032.s.nlw)
442 pciercx_cfg448.s.rtl = 1677;
444 case 2: /* 2 lanes */
445 pciercx_cfg448.s.rtl = 867;
447 case 4: /* 4 lanes */
448 pciercx_cfg448.s.rtl = 462;
450 case 8: /* 8 lanes */
451 pciercx_cfg448.s.rtl = 258;
454 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
461 * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
464 * @param pcie_port PCIe port to initialize
466 * @return Zero on success
468 static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
472 uint64_t addr_swizzle;
473 cvmx_ciu_soft_prst_t ciu_soft_prst;
474 cvmx_pescx_bist_status_t pescx_bist_status;
475 cvmx_pescx_bist_status2_t pescx_bist_status2;
476 cvmx_npei_ctl_status_t npei_ctl_status;
477 cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
478 cvmx_npei_mem_access_subidx_t mem_access_subid;
479 cvmx_npei_dbg_data_t npei_dbg_data;
480 cvmx_pescx_ctl_status2_t pescx_ctl_status2;
481 cvmx_pciercx_cfg032_t pciercx_cfg032;
482 cvmx_npei_bar1_indexx_t bar1_index;
485 /* Make sure we aren't trying to setup a target mode interface in host mode */
486 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
487 if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
489 cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
493 /* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
494 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
496 npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
497 if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
499 cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
504 /* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
505 npei_ctl_status.s.arb = 1;
506 /* Allow up to 0x20 config retries */
507 npei_ctl_status.s.cfg_rtry = 0x20;
508 /* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
509 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
511 npei_ctl_status.s.p0_ntags = 0x20;
512 npei_ctl_status.s.p1_ntags = 0x20;
514 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
516 /* Bring the PCIe out of reset */
517 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
519 /* The EBH5200 board swapped the PCIe reset lines on the board. As a
520 workaround for this bug, we bring both PCIe ports out of reset at
521 the same time instead of on separate calls. So for port 0, we bring
522 both out of reset and do nothing on port 1 */
525 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
526 /* After a chip reset the PCIe will also be in reset. If it isn't,
527 most likely someone is trying to init it again without a proper
529 if (ciu_soft_prst.s.soft_prst == 0)
531 /* Reset the ports */
532 ciu_soft_prst.s.soft_prst = 1;
533 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
534 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
535 ciu_soft_prst.s.soft_prst = 1;
536 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
537 /* Wait until pcie resets the ports. */
538 cvmx_wait_usec(2000);
540 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
541 ciu_soft_prst.s.soft_prst = 0;
542 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
543 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
544 ciu_soft_prst.s.soft_prst = 0;
545 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
550 /* The normal case: The PCIe ports are completely separate and can be
551 brought out of reset independently */
553 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
555 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
556 /* After a chip reset the PCIe will also be in reset. If it isn't,
557 most likely someone is trying to init it again without a proper
559 if (ciu_soft_prst.s.soft_prst == 0)
562 ciu_soft_prst.s.soft_prst = 1;
564 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
566 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
567 /* Wait until pcie resets the ports. */
568 cvmx_wait_usec(2000);
572 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
573 ciu_soft_prst.s.soft_prst = 0;
574 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
578 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
579 ciu_soft_prst.s.soft_prst = 0;
580 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
584 /* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
585 PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
588 /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
589 CN52XX, so we only probe it on newer chips */
590 if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
592 /* Clear PCLK_RUN so we can check if the clock is running */
593 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
594 pescx_ctl_status2.s.pclk_run = 1;
595 cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
596 /* Now that we cleared PCLK_RUN, wait for it to be set again telling
597 us the clock is running */
598 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
599 cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
601 cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
606 /* Check and make sure PCIe came out of reset. If it doesn't the board
607 probably hasn't wired the clocks up and the interface should be
609 pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
610 if (pescx_ctl_status2.s.pcierst)
612 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
616 /* Check BIST2 status. If any bits are set skip this interface. This
617 is an attempt to catch PCIE-813 on pass 1 parts */
618 pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
619 if (pescx_bist_status2.u64)
621 cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
625 /* Check BIST status */
626 pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
627 if (pescx_bist_status.u64)
628 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
630 /* Initialize the config space CSRs */
631 __cvmx_pcie_rc_initialize_config_space(pcie_port);
633 /* Bring the link up */
634 if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
636 cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
640 /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
641 npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
642 npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
643 npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
644 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
646 /* Setup Mem access SubDIDs */
647 mem_access_subid.u64 = 0;
648 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
649 mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
650 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
651 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
652 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
653 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
654 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
655 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
656 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
658 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
659 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
661 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
662 mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
665 /* Disable the peer to peer forwarding register. This must be setup
666 by the OS after it enumerates the bus and assigns addresses to the
670 cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
671 cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
674 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
675 cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
677 /* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
678 cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
681 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
682 bar1_index.s.ca = 1; /* Not Cached */
683 bar1_index.s.end_swp = 1; /* Endian Swap mode */
684 bar1_index.s.addr_v = 1; /* Valid entry */
686 base = pcie_port ? 16 : 0;
688 /* Big endian swizzle for 32-bit PEXP_NCB register. */
694 for (i = 0; i < 16; i++) {
695 cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
697 /* 256MB / 16 >> 22 == 4 */
698 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
701 /* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
702 where they overlap. It also overlaps with the device addresses, so
703 make sure the peer to peer forwarding is set right */
704 cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
706 /* Setup BAR2 attributes */
707 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
708 /* Â PTLP_RO,CTLP_RO should normally be set (except for debug). */
709 /* Â WAIT_COM=0 will likely work for all applications. */
710 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
713 cvmx_npei_ctl_port1_t npei_ctl_port;
714 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
715 npei_ctl_port.s.bar2_enb = 1;
716 npei_ctl_port.s.bar2_esx = 1;
717 npei_ctl_port.s.bar2_cax = 0;
718 npei_ctl_port.s.ptlp_ro = 1;
719 npei_ctl_port.s.ctlp_ro = 1;
720 npei_ctl_port.s.wait_com = 0;
721 npei_ctl_port.s.waitl_com = 0;
722 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
726 cvmx_npei_ctl_port0_t npei_ctl_port;
727 npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
728 npei_ctl_port.s.bar2_enb = 1;
729 npei_ctl_port.s.bar2_esx = 1;
730 npei_ctl_port.s.bar2_cax = 0;
731 npei_ctl_port.s.ptlp_ro = 1;
732 npei_ctl_port.s.ctlp_ro = 1;
733 npei_ctl_port.s.wait_com = 0;
734 npei_ctl_port.s.waitl_com = 0;
735 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
738 /* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
739 TLP ordering to not be preserved after multiple PCIe port resets. This
740 code detects this fault and corrects it by aligning the TLP counters
741 properly. Another link reset is then performed. See PCIE-13340 */
742 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
743 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
745 cvmx_npei_dbg_data_t dbg_data;
746 int old_in_fif_p_count;
749 int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
752 /* Choose a write address of 1MB. It should be harmless as all bars
753 haven't been setup */
754 uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
756 /* Make sure at least in_p_offset have been executed before we try and
757 read in_fif_p_count */
761 cvmx_write64_uint32(write_address, 0);
765 /* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
766 unstable sometimes so read it twice with a write between the reads.
767 This way we can tell the value is good as it will increment by one
769 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
770 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
773 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
774 old_in_fif_p_count = dbg_data.s.data & 0xff;
775 cvmx_write64_uint32(write_address, 0);
777 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
778 in_fif_p_count = dbg_data.s.data & 0xff;
779 } while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
781 /* Update in_fif_p_count for it's offset with respect to out_p_count */
782 in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
784 /* Read the OUT_P_COUNT from the debug select */
785 cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
786 cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
787 dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
788 out_p_count = (dbg_data.s.data>>1) & 0xff;
790 /* Check that the two counters are aligned */
791 if (out_p_count != in_fif_p_count)
793 cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
794 while (in_fif_p_count != 0)
796 cvmx_write64_uint32(write_address, 0);
798 in_fif_p_count = (in_fif_p_count + 1) & 0xff;
800 /* The EBH5200 board swapped the PCIe reset lines on the board. This
801 means we must bring both links down and up, which will cause the
802 PCIe0 to need alignment again. Lots of messages will be displayed,
803 but everything should work */
804 if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
806 cvmx_pcie_rc_initialize(0);
807 /* Rety bringing this port up */
812 /* Display the link status */
813 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
814 cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
822 * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
823 * port from reset to a link up state. Software can then begin
824 * configuring the rest of the link.
826 * @param pcie_port PCIe port to initialize
828 * @return Zero on success
830 static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
832 uint64_t start_cycle;
833 cvmx_pemx_ctl_status_t pem_ctl_status;
834 cvmx_pciercx_cfg032_t pciercx_cfg032;
835 cvmx_pciercx_cfg448_t pciercx_cfg448;
837 /* Bring up the link */
838 pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
839 pem_ctl_status.s.lnk_enb = 1;
840 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
842 /* Wait for the link to come up */
843 start_cycle = cvmx_get_cycle();
846 if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
849 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
850 } while (pciercx_cfg032.s.dlla == 0);
852 /* Update the Replay Time Limit. Empirically, some PCIe devices take a
853 little longer to respond than expected under load. As a workaround for
854 this we configure the Replay Time Limit to the value expected for a 512
855 byte MPS instead of our actual 256 byte MPS. The numbers below are
856 directly from the PCIe spec table 3-4 */
857 pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
858 switch (pciercx_cfg032.s.nlw)
861 pciercx_cfg448.s.rtl = 1677;
863 case 2: /* 2 lanes */
864 pciercx_cfg448.s.rtl = 867;
866 case 4: /* 4 lanes */
867 pciercx_cfg448.s.rtl = 462;
869 case 8: /* 8 lanes */
870 pciercx_cfg448.s.rtl = 258;
873 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
880 * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
883 * @param pcie_port PCIe port to initialize
885 * @return Zero on success
887 static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
890 cvmx_ciu_soft_prst_t ciu_soft_prst;
891 cvmx_mio_rst_ctlx_t mio_rst_ctl;
892 cvmx_pemx_bar_ctl_t pemx_bar_ctl;
893 cvmx_pemx_ctl_status_t pemx_ctl_status;
894 cvmx_pemx_bist_status_t pemx_bist_status;
895 cvmx_pemx_bist_status2_t pemx_bist_status2;
896 cvmx_pciercx_cfg032_t pciercx_cfg032;
897 cvmx_pciercx_cfg515_t pciercx_cfg515;
898 cvmx_sli_ctl_portx_t sli_ctl_portx;
899 cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
900 cvmx_sli_mem_access_subidx_t mem_access_subid;
901 cvmx_mio_rst_ctlx_t mio_rst_ctlx;
902 cvmx_sriox_status_reg_t sriox_status_reg;
903 cvmx_pemx_bar1_indexx_t bar1_index;
905 /* Make sure this interface isn't SRIO */
906 sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
907 if (sriox_status_reg.s.srio)
909 cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
913 /* Make sure we aren't trying to setup a target mode interface in host mode */
914 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
915 if (!mio_rst_ctl.s.host_mode)
917 cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
921 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
922 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
926 cvmx_ciu_qlm1_t ciu_qlm;
927 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
928 ciu_qlm.s.txbypass = 1;
929 ciu_qlm.s.txdeemph = 5;
930 ciu_qlm.s.txmargin = 0x17;
931 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
935 cvmx_ciu_qlm0_t ciu_qlm;
936 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
937 ciu_qlm.s.txbypass = 1;
938 ciu_qlm.s.txdeemph = 5;
939 ciu_qlm.s.txmargin = 0x17;
940 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
944 /* Bring the PCIe out of reset */
946 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
948 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
949 /* After a chip reset the PCIe will also be in reset. If it isn't,
950 most likely someone is trying to init it again without a proper
952 if (ciu_soft_prst.s.soft_prst == 0)
955 ciu_soft_prst.s.soft_prst = 1;
957 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
959 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
960 /* Wait until pcie resets the ports. */
961 cvmx_wait_usec(2000);
965 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
966 ciu_soft_prst.s.soft_prst = 0;
967 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
971 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
972 ciu_soft_prst.s.soft_prst = 0;
973 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
976 /* Wait for PCIe reset to complete */
977 cvmx_wait_usec(1000);
979 /* Check and make sure PCIe came out of reset. If it doesn't the board
980 probably hasn't wired the clocks up and the interface should be
982 mio_rst_ctlx.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
983 if (!mio_rst_ctlx.s.rst_done)
985 cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
989 /* Check BIST status */
990 pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
991 if (pemx_bist_status.u64)
992 cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
993 pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
994 if (pemx_bist_status2.u64)
995 cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
997 /* Initialize the config space CSRs */
998 __cvmx_pcie_rc_initialize_config_space(pcie_port);
1000 /* Enable gen2 speed selection */
1001 pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1002 pciercx_cfg515.s.dsc = 1;
1003 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1005 /* Bring the link up */
1006 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1008 /* Some gen1 devices don't handle the gen 2 training correctly. Disable
1009 gen2 and try again with only gen1 */
1010 cvmx_pciercx_cfg031_t pciercx_cfg031;
1011 pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1012 pciercx_cfg031.s.mls = 1;
1013 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg515.u32);
1014 if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
1016 cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1021 /* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1022 sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1023 sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
1024 sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
1025 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1027 /* Setup Mem access SubDIDs */
1028 mem_access_subid.u64 = 0;
1029 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1030 mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
1031 mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
1032 mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
1033 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1034 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1035 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
1037 /* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
1038 for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
1040 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1041 mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
1044 /* Disable the peer to peer forwarding register. This must be setup
1045 by the OS after it enumerates the bus and assigns addresses to the
1049 cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1050 cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1053 /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1054 cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1056 /* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
1057 where they overlap. It also overlaps with the device addresses, so
1058 make sure the peer to peer forwarding is set right */
1059 cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1061 /* Setup BAR2 attributes */
1062 /* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
1063 /* Â PTLP_RO,CTLP_RO should normally be set (except for debug). */
1064 /* Â WAIT_COM=0 will likely work for all applications. */
1065 /* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
1066 pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1067 pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
1068 pemx_bar_ctl.s.bar2_enb = 1;
1069 pemx_bar_ctl.s.bar2_esx = 1;
1070 pemx_bar_ctl.s.bar2_cax = 0;
1071 cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1072 sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1073 sli_ctl_portx.s.ptlp_ro = 1;
1074 sli_ctl_portx.s.ctlp_ro = 1;
1075 sli_ctl_portx.s.wait_com = 0;
1076 sli_ctl_portx.s.waitl_com = 0;
1077 cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1079 /* BAR1 follows BAR2 */
1080 cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1083 bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1084 bar1_index.s.ca = 1; /* Not Cached */
1085 bar1_index.s.end_swp = 1; /* Endian Swap mode */
1086 bar1_index.s.addr_v = 1; /* Valid entry */
1088 for (i = 0; i < 16; i++) {
1089 cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1090 /* 256MB / 16 >> 22 == 4 */
1091 bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1094 /* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
1096 pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1097 pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1098 cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1100 /* Display the link status */
1101 pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1102 cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1108 * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1110 * @param pcie_port PCIe port to initialize
1112 * @return Zero on success
1114 int cvmx_pcie_rc_initialize(int pcie_port)
1117 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1118 result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1120 result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1121 #if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1123 cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1130 * Shutdown a PCIe port and put it in reset
1132 * @param pcie_port PCIe port to shutdown
1134 * @return Zero on success
1136 int cvmx_pcie_rc_shutdown(int pcie_port)
1138 #if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) || defined(CONFIG_CAVIUM_DECODE_RSL)
1139 cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
1141 /* Wait for all pending operations to complete */
1142 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1144 if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
1145 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1149 if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
1150 cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
1156 cvmx_ciu_soft_prst_t ciu_soft_prst;
1157 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1158 ciu_soft_prst.s.soft_prst = 1;
1159 cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1163 cvmx_ciu_soft_prst_t ciu_soft_prst;
1164 ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1165 ciu_soft_prst.s.soft_prst = 1;
1166 cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1174 * Build a PCIe config space request address for a device
1176 * @param pcie_port PCIe port to access
1177 * @param bus Sub bus
1178 * @param dev Device ID
1179 * @param fn Device sub function
1180 * @param reg Register to access
1182 * @return 64bit Octeon IO address
1184 static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
1186 cvmx_pcie_address_t pcie_addr;
1187 cvmx_pciercx_cfg006_t pciercx_cfg006;
1189 pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
1190 if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
1194 pcie_addr.config.upper = 2;
1195 pcie_addr.config.io = 1;
1196 pcie_addr.config.did = 3;
1197 pcie_addr.config.subdid = 1;
1198 pcie_addr.config.es = 1;
1199 pcie_addr.config.port = pcie_port;
1200 pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
1201 pcie_addr.config.bus = bus;
1202 pcie_addr.config.dev = dev;
1203 pcie_addr.config.func = fn;
1204 pcie_addr.config.reg = reg;
1205 return pcie_addr.u64;
1210 * Read 8bits from a Device's config space
1212 * @param pcie_port PCIe port the device is on
1213 * @param bus Sub bus
1214 * @param dev Device ID
1215 * @param fn Device sub function
1216 * @param reg Register to access
1218 * @return Result of the read
1220 uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
1222 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1224 return cvmx_read64_uint8(address);
1231 * Read 16bits from a Device's config space
1233 * @param pcie_port PCIe port the device is on
1234 * @param bus Sub bus
1235 * @param dev Device ID
1236 * @param fn Device sub function
1237 * @param reg Register to access
1239 * @return Result of the read
1241 uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
1243 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1245 return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
1252 * Read 32bits from a Device's config space
1254 * @param pcie_port PCIe port the device is on
1255 * @param bus Sub bus
1256 * @param dev Device ID
1257 * @param fn Device sub function
1258 * @param reg Register to access
1260 * @return Result of the read
1262 uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
1264 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1266 return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
1273 * Write 8bits to a Device's config space
1275 * @param pcie_port PCIe port the device is on
1276 * @param bus Sub bus
1277 * @param dev Device ID
1278 * @param fn Device sub function
1279 * @param reg Register to access
1280 * @param val Value to write
1282 void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
1284 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1286 cvmx_write64_uint8(address, val);
1291 * Write 16bits to a Device's config space
1293 * @param pcie_port PCIe port the device is on
1294 * @param bus Sub bus
1295 * @param dev Device ID
1296 * @param fn Device sub function
1297 * @param reg Register to access
1298 * @param val Value to write
1300 void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
1302 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1304 cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
1309 * Write 32bits to a Device's config space
1311 * @param pcie_port PCIe port the device is on
1312 * @param bus Sub bus
1313 * @param dev Device ID
1314 * @param fn Device sub function
1315 * @param reg Register to access
1316 * @param val Value to write
1318 void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
1320 uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
1322 cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
1327 * Read a PCIe config space register indirectly. This is used for
1328 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1330 * @param pcie_port PCIe port to read from
1331 * @param cfg_offset Address to read
1333 * @return Value read
1335 uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
1337 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1339 cvmx_pescx_cfg_rd_t pescx_cfg_rd;
1340 pescx_cfg_rd.u64 = 0;
1341 pescx_cfg_rd.s.addr = cfg_offset;
1342 cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
1343 pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
1344 return pescx_cfg_rd.s.data;
1348 cvmx_pemx_cfg_rd_t pemx_cfg_rd;
1349 pemx_cfg_rd.u64 = 0;
1350 pemx_cfg_rd.s.addr = cfg_offset;
1351 cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
1352 pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
1353 return pemx_cfg_rd.s.data;
1359 * Write a PCIe config space register indirectly. This is used for
1360 * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
1362 * @param pcie_port PCIe port to write to
1363 * @param cfg_offset Address to write
1364 * @param val Value to write
1366 void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
1368 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1370 cvmx_pescx_cfg_wr_t pescx_cfg_wr;
1371 pescx_cfg_wr.u64 = 0;
1372 pescx_cfg_wr.s.addr = cfg_offset;
1373 pescx_cfg_wr.s.data = val;
1374 cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
1378 cvmx_pemx_cfg_wr_t pemx_cfg_wr;
1379 pemx_cfg_wr.u64 = 0;
1380 pemx_cfg_wr.s.addr = cfg_offset;
1381 pemx_cfg_wr.s.data = val;
1382 cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
1388 * Initialize a PCIe port for use in target(EP) mode.
1390 * @param pcie_port PCIe port to initialize
1392 * @return Zero on success
1394 int cvmx_pcie_ep_initialize(int pcie_port)
1396 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1398 cvmx_npei_ctl_status_t npei_ctl_status;
1399 npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1400 if (npei_ctl_status.s.host_mode)
1405 cvmx_mio_rst_ctlx_t mio_rst_ctl;
1406 mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1407 if (mio_rst_ctl.s.host_mode)
1411 /* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1412 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
1416 cvmx_ciu_qlm1_t ciu_qlm;
1417 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1418 ciu_qlm.s.txbypass = 1;
1419 ciu_qlm.s.txdeemph = 5;
1420 ciu_qlm.s.txmargin = 0x17;
1421 cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1425 cvmx_ciu_qlm0_t ciu_qlm;
1426 ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1427 ciu_qlm.s.txbypass = 1;
1428 ciu_qlm.s.txdeemph = 5;
1429 ciu_qlm.s.txmargin = 0x17;
1430 cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1434 /* Enable bus master and memory */
1435 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
1437 /* Max Payload Size (PCIE*_CFG030[MPS]) */
1438 /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
1439 /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
1440 /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
1442 cvmx_pcieepx_cfg030_t pcieepx_cfg030;
1443 pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
1444 if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
1446 pcieepx_cfg030.s.mps = MPS_CN5XXX;
1447 pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
1451 pcieepx_cfg030.s.mps = MPS_CN6XXX;
1452 pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
1454 pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
1455 pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
1456 pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
1457 pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
1458 pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
1459 pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
1460 cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
1463 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1465 /* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
1466 /* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1467 cvmx_npei_ctl_status2_t npei_ctl_status2;
1468 npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
1469 npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
1470 npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
1471 cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
1475 /* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
1476 /* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
1477 cvmx_dpi_sli_prtx_cfg_t prt_cfg;
1478 cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
1479 prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
1480 prt_cfg.s.mps = MPS_CN6XXX;
1481 prt_cfg.s.mrrs = MRRS_CN6XXX;
1482 cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
1484 sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
1485 sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
1486 cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
1489 /* Setup Mem access SubDID 12 to access Host memory */
1490 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1492 cvmx_npei_mem_access_subidx_t mem_access_subid;
1493 mem_access_subid.u64 = 0;
1494 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1495 mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */
1496 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
1497 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
1498 mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
1499 mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
1500 mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
1501 mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
1502 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
1503 cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
1507 cvmx_sli_mem_access_subidx_t mem_access_subid;
1508 mem_access_subid.u64 = 0;
1509 mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1510 mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */
1511 mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
1512 mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
1513 mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1514 mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
1515 mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
1516 cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
1523 * Wait for posted PCIe read/writes to reach the other side of
1524 * the internal PCIe switch. This will insure that core
1525 * read/writes are posted before anything after this function
1526 * is called. This may be necessary when writing to memory that
1527 * will later be read using the DMA/PKT engines.
1529 * @param pcie_port PCIe port to wait for
1531 void cvmx_pcie_wait_for_pending(int pcie_port)
1533 if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1535 cvmx_npei_data_out_cnt_t npei_data_out_cnt;
1540 /* See section 9.8, PCIe Core-initiated Requests, in the manual for a
1541 description of how this code works */
1542 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1545 if (!npei_data_out_cnt.s.p1_fcnt)
1547 a = npei_data_out_cnt.s.p1_ucnt;
1548 b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1552 if (!npei_data_out_cnt.s.p0_fcnt)
1554 a = npei_data_out_cnt.s.p0_ucnt;
1555 b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1560 npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
1561 c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
1576 cvmx_sli_data_out_cnt_t sli_data_out_cnt;
1581 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1584 if (!sli_data_out_cnt.s.p1_fcnt)
1586 a = sli_data_out_cnt.s.p1_ucnt;
1587 b = (a + sli_data_out_cnt.s.p1_fcnt-1) & 0xffff;
1591 if (!sli_data_out_cnt.s.p0_fcnt)
1593 a = sli_data_out_cnt.s.p0_ucnt;
1594 b = (a + sli_data_out_cnt.s.p0_fcnt-1) & 0xffff;
1599 sli_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_SLI_DATA_OUT_CNT);
1600 c = (pcie_port) ? sli_data_out_cnt.s.p1_ucnt : sli_data_out_cnt.s.p0_ucnt;