1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
43 * Functions for accessing memory and CSRs on Octeon when we are compiling
46 * <hr>$Revision: 38306 $<hr>
48 #ifndef __CVMX_ACCESS_NATIVE_H__
49 #define __CVMX_ACCESS_NATIVE_H__
56 * Returns the Octeon processor ID.
58 * @return Octeon processor ID from COP0
60 static inline uint32_t cvmx_get_proc_id(void)
62 #ifdef CVMX_BUILD_FOR_LINUX_USER
63 extern uint32_t cvmx_app_init_processor_id;
64 return cvmx_app_init_processor_id;
67 asm ("mfc0 %0, $15,0" : "=r" (id));
73 * Convert a memory pointer (void*) into a hardware compatable
74 * memory address (uint64_t). Octeon hardware widgets don't
75 * understand logical addresses.
77 * @param ptr C style memory pointer
78 * @return Hardware physical address
80 static inline uint64_t cvmx_ptr_to_phys(void *ptr)
82 if (CVMX_ENABLE_PARAMETER_CHECKING)
83 cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
85 #ifdef CVMX_BUILD_FOR_UBOOT
86 uint64_t uboot_tlb_ptr_to_phys(void *ptr);
88 if (((uint32_t)ptr) < 0x80000000)
90 /* Handle useg (unmapped due to ERL) here*/
91 return(CAST64(ptr) & 0x7FFFFFFF);
93 else if (((uint32_t)ptr) < 0xC0000000)
95 /* Here we handle KSEG0/KSEG1 _pointers_. We know we are dealing
96 ** with 32 bit only values, so we treat them that way. Note that
97 ** a cvmx_phys_to_ptr(cvmx_ptr_to_phys(X)) will not return X in this case,
98 ** but the physical address of the KSEG0/KSEG1 address. */
99 return(CAST64(ptr) & 0x1FFFFFFF);
102 return(uboot_tlb_ptr_to_phys(ptr)); /* Should not get get here in !TLB case */
107 if (sizeof(void*) == 8)
109 /* We're running in 64 bit mode. Normally this means that we can use
110 40 bits of address space (the hardware limit). Unfortunately there
111 is one case were we need to limit this to 30 bits, sign extended
112 32 bit. Although these are 64 bits wide, only 30 bits can be used */
113 if ((CAST64(ptr) >> 62) == 3)
114 return CAST64(ptr) & cvmx_build_mask(30);
116 return CAST64(ptr) & cvmx_build_mask(40);
121 return (long)(ptr) & 0x1fffffff;
123 extern uint64_t linux_mem32_offset;
124 if (cvmx_likely(ptr))
125 return CAST64(ptr) - linux_mem32_offset;
130 #elif defined(_WRS_KERNEL)
131 return (long)(ptr) & 0x7fffffff;
132 #elif defined(VXWORKS_USER_MAPPINGS)
133 /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
134 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
135 uint64_t address = (long)ptr;
136 if (address & 0x80000000)
137 return address & 0x1fffffff; /* KSEG pointers directly map the lower 256MB and bootbus */
138 else if ((address >= 0x10000000) && (address < 0x20000000))
139 return address + 0x400000000ull; /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
141 return address; /* Looks to be a 1:1 mapped userspace pointer */
142 #elif defined(__FreeBSD__) && defined(_KERNEL)
143 return (pmap_kextract((vm_offset_t)ptr));
145 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
146 /* We are assumung we're running the Simple Executive standalone. In this
147 mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
148 addresses are never used. Since we know all this, save the masking
149 cycles and do nothing */
153 if (sizeof(void*) == 8)
155 /* We're running in 64 bit mode. Normally this means that we can use
156 40 bits of address space (the hardware limit). Unfortunately there
157 is one case were we need to limit this to 30 bits, sign extended
158 32 bit. Although these are 64 bits wide, only 30 bits can be used */
159 if ((CAST64(ptr) >> 62) == 3)
160 return CAST64(ptr) & cvmx_build_mask(30);
162 return CAST64(ptr) & cvmx_build_mask(40);
165 return (long)(ptr) & 0x7fffffff;
173 * Convert a hardware physical address (uint64_t) into a
174 * memory pointer (void *).
176 * @param physical_address
177 * Hardware physical address to memory
178 * @return Pointer to memory
180 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
182 if (CVMX_ENABLE_PARAMETER_CHECKING)
183 cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
185 #ifdef CVMX_BUILD_FOR_UBOOT
186 #if !CONFIG_OCTEON_UBOOT_TLB
187 if (physical_address >= 0x80000000)
190 return CASTPTR(void, (physical_address & 0x7FFFFFFF));
193 /* U-boot is a special case, as it is running in 32 bit mode, using the TLB to map code/data
194 ** which can have a physical address above the 32 bit address space. 1-1 mappings are used
195 ** to allow the low 2 GBytes to be accessed as in error level.
197 ** NOTE: This conversion can cause problems in u-boot, as users may want to enter addresses
198 ** like 0xBFC00000 (kseg1 boot bus address), which is a valid 64 bit physical address,
199 ** but is likely intended to be a boot bus address. */
201 if (physical_address < 0x80000000)
203 /* Handle useg here. ERL is set, so useg is unmapped. This is the only physical
204 ** address range that is directly addressable by u-boot. */
205 return CASTPTR(void, physical_address);
209 DECLARE_GLOBAL_DATA_PTR;
210 extern char uboot_start;
211 /* Above 0x80000000 we can only support one case - a physical address
212 ** that is mapped for u-boot code/data. We check against the u-boot mem range,
213 ** and return NULL if it is out of this range.
215 if (physical_address >= gd->bd->bi_uboot_ram_addr
216 && physical_address < gd->bd->bi_uboot_ram_addr + gd->bd->bi_uboot_ram_used_size)
218 return ((char *)&uboot_start + (physical_address - gd->bd->bi_uboot_ram_addr));
224 if (physical_address >= 0x80000000)
230 if (sizeof(void*) == 8)
232 /* Just set the top bit, avoiding any TLB uglyness */
233 return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
238 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
240 extern uint64_t linux_mem32_offset;
241 if (cvmx_likely(physical_address))
242 return CASTPTR(void, physical_address + linux_mem32_offset);
247 #elif defined(_WRS_KERNEL)
248 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
249 #elif defined(VXWORKS_USER_MAPPINGS)
250 /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
251 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
252 if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
253 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
254 else if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && (physical_address >= 0x410000000ull) &&
255 (physical_address < 0x420000000ull))
256 return CASTPTR(void, physical_address - 0x400000000ull);
258 return CASTPTR(void, physical_address);
259 #elif defined(__FreeBSD__) && defined(_KERNEL)
260 #if defined(__mips_n64)
261 return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
263 if (physical_address < 0x20000000)
264 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
266 panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
270 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
271 /* We are assumung we're running the Simple Executive standalone. In this
272 mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
273 addresses are never used. Since we know all this, save bit insert
274 cycles and do nothing */
275 return CASTPTR(void, physical_address);
277 /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
278 if (sizeof(void*) == 8)
279 return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
281 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
289 /* The following #if controls the definition of the macro
290 CVMX_BUILD_WRITE64. This macro is used to build a store operation to
291 a full 64bit address. With a 64bit ABI, this can be done with a simple
292 pointer access. 32bit ABIs require more complicated assembly */
293 #if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
295 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
296 a simple volatile pointer */
297 #define CVMX_BUILD_WRITE64(TYPE, ST) \
298 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
300 *CASTPTR(volatile TYPE##_t, addr) = val; \
303 #elif defined(CVMX_ABI_N32)
305 /* The N32 ABI passes all 64bit quantities in a single register, so it is
306 possible to use the arguments directly. We have to use inline assembly
307 for the actual store since a pointer would truncate the address */
308 #define CVMX_BUILD_WRITE64(TYPE, ST) \
309 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
311 asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
314 #elif defined(CVMX_ABI_O32)
317 #define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
320 /* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
321 separate registers. Assembly must be used to put them back together
322 before they're used. What should be a simple store becomes a
323 convoluted mess of shifts and ors */
324 #define CVMX_BUILD_WRITE64(TYPE, ST) \
325 static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
327 if (sizeof(TYPE##_t) == 8) \
329 uint32_t csr_addrh = csr_addr>>32; \
330 uint32_t csr_addrl = csr_addr; \
331 uint32_t valh = (uint64_t)val>>32; \
332 uint32_t vall = val; \
340 "dsll %[tmp1], %[valh], 32\n" \
341 "dsll %[tmp2], %[csrh], 32\n" \
342 "dsll %[tmp3], %[vall], 32\n" \
343 "dsrl %[tmp3], %[tmp3], 32\n" \
344 "or %[tmp1], %[tmp1], %[tmp3]\n" \
345 "dsll %[tmp3], %[csrl], 32\n" \
346 "dsrl %[tmp3], %[tmp3], 32\n" \
347 "or %[tmp2], %[tmp2], %[tmp3]\n" \
348 ST " %[tmp1], 0(%[tmp2])\n" \
350 : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
351 : [valh] "r" (valh), [vall] "r" (vall), \
352 [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
357 uint32_t csr_addrh = csr_addr>>32; \
358 uint32_t csr_addrl = csr_addr; \
365 "dsll %[tmp1], %[csrh], 32\n" \
366 "dsll %[tmp2], %[csrl], 32\n" \
367 "dsrl %[tmp2], %[tmp2], 32\n" \
368 "or %[tmp1], %[tmp1], %[tmp2]\n" \
369 ST " %[val], 0(%[tmp1])\n" \
371 : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \
372 : [val] "r" (val), [csrh] "r" (csr_addrh), \
373 [csrl] "r" (csr_addrl) \
382 /* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
383 #error: Unsupported ABI
387 /* The following #if controls the definition of the macro
388 CVMX_BUILD_READ64. This macro is used to build a load operation from
389 a full 64bit address. With a 64bit ABI, this can be done with a simple
390 pointer access. 32bit ABIs require more complicated assembly */
391 #if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
393 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
394 a simple volatile pointer */
395 #define CVMX_BUILD_READ64(TYPE, LT) \
396 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
398 return *CASTPTR(volatile TYPE##_t, addr); \
401 #elif defined(CVMX_ABI_N32)
403 /* The N32 ABI passes all 64bit quantities in a single register, so it is
404 possible to use the arguments directly. We have to use inline assembly
405 for the actual store since a pointer would truncate the address */
406 #define CVMX_BUILD_READ64(TYPE, LT) \
407 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
410 asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
414 #elif defined(CVMX_ABI_O32)
417 #define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
420 /* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
421 separate registers. Assembly must be used to put them back together
422 before they're used. What should be a simple load becomes a
423 convoluted mess of shifts and ors */
424 #define CVMX_BUILD_READ64(TYPE, LT) \
425 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr) \
427 if (sizeof(TYPE##_t) == 8) \
429 uint32_t csr_addrh = csr_addr>>32; \
430 uint32_t csr_addrl = csr_addr; \
437 "dsll %[valh], %[csrh], 32\n" \
438 "dsll %[vall], %[csrl], 32\n" \
439 "dsrl %[vall], %[vall], 32\n" \
440 "or %[valh], %[valh], %[vall]\n" \
441 LT " %[vall], 0(%[valh])\n" \
442 "dsrl %[valh], %[vall], 32\n" \
446 : [valh] "=&r" (valh), [vall] "=&r" (vall) \
447 : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
449 return ((uint64_t)valh<<32) | vall; \
453 uint32_t csr_addrh = csr_addr>>32; \
454 uint32_t csr_addrl = csr_addr; \
461 "dsll %[val], %[csrh], 32\n" \
462 "dsll %[tmp], %[csrl], 32\n" \
463 "dsrl %[tmp], %[tmp], 32\n" \
464 "or %[val], %[val], %[tmp]\n" \
465 LT " %[val], 0(%[val])\n" \
467 : [val] "=&r" (val), [tmp] "=&r" (tmp) \
468 : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
474 #endif /* __KERNEL__ */
478 /* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
479 #error: Unsupported ABI
483 /* The following defines 8 functions for writing to a 64bit address. Each
484 takes two arguments, the address and the value to write.
485 cvmx_write64_int64 cvmx_write64_uint64
486 cvmx_write64_int32 cvmx_write64_uint32
487 cvmx_write64_int16 cvmx_write64_uint16
488 cvmx_write64_int8 cvmx_write64_uint8 */
489 CVMX_BUILD_WRITE64(int64, "sd");
490 CVMX_BUILD_WRITE64(int32, "sw");
491 CVMX_BUILD_WRITE64(int16, "sh");
492 CVMX_BUILD_WRITE64(int8, "sb");
493 CVMX_BUILD_WRITE64(uint64, "sd");
494 CVMX_BUILD_WRITE64(uint32, "sw");
495 CVMX_BUILD_WRITE64(uint16, "sh");
496 CVMX_BUILD_WRITE64(uint8, "sb");
498 /* The following defines 8 functions for reading from a 64bit address. Each
499 takes the address as the only argument
500 cvmx_read64_int64 cvmx_read64_uint64
501 cvmx_read64_int32 cvmx_read64_uint32
502 cvmx_read64_int16 cvmx_read64_uint16
503 cvmx_read64_int8 cvmx_read64_uint8 */
504 CVMX_BUILD_READ64(int64, "ld");
505 CVMX_BUILD_READ64(int32, "lw");
506 CVMX_BUILD_READ64(int16, "lh");
507 CVMX_BUILD_READ64(int8, "lb");
508 CVMX_BUILD_READ64(uint64, "ld");
509 CVMX_BUILD_READ64(uint32, "lw");
510 CVMX_BUILD_READ64(uint16, "lhu");
511 CVMX_BUILD_READ64(uint8, "lbu");
513 static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
515 cvmx_write64_uint64(csr_addr, val);
517 /* Perform an immediate read after every write to an RSL register to force
518 the write to complete. It doesn't matter what RSL read we do, so we
519 choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
520 if (((csr_addr >> 40) & 0x7ffff) == (0x118))
521 cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
524 static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
526 cvmx_write64_uint64(io_addr, val);
529 static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
531 return cvmx_read64_uint64(csr_addr);
534 static inline void cvmx_send_single(uint64_t data)
536 const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
537 cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
540 static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
546 uint64_t scraddr : 8;
552 addr.s.scraddr = scraddr >> 3;
554 cvmx_send_single(addr.u64);
559 * Number of the Core on which the program is currently running.
561 * @return Number of cores
563 static inline unsigned int cvmx_get_core_num(void)
565 unsigned int core_num;
566 CVMX_RDHWRNV(core_num, 0);
572 * Returns the number of bits set in the provided value.
573 * Simple wrapper for POP instruction.
575 * @param val 32 bit value to count set bits in
577 * @return Number of bits set
579 static inline uint32_t cvmx_pop(uint32_t val)
588 * Returns the number of bits set in the provided value.
589 * Simple wrapper for DPOP instruction.
591 * @param val 64 bit value to count set bits in
593 * @return Number of bits set
595 static inline int cvmx_dpop(uint64_t val)
605 * Provide current cycle counter as a return value. Deprecated, use
606 * cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
608 * @return current cycle counter
610 static inline uint64_t cvmx_get_cycle(void)
612 return cvmx_clock_get_count(CVMX_CLOCK_CORE);
618 * Reads a chip global cycle counter. This counts SCLK cycles since
619 * chip reset. The counter is 64 bit. This function is deprecated as the rate
620 * of the global cycle counter is different between Octeon+ and Octeon2, use
621 * cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
622 * of SCLK may be differnet than the core clock.
624 * @return Global chip cycle count since chip reset.
626 static inline uint64_t cvmx_get_cycle_global(void)
628 return cvmx_clock_get_count(CVMX_CLOCK_IPD);
633 * Wait for the specified number of core clock cycles
637 static inline void cvmx_wait(uint64_t cycles)
639 uint64_t done = cvmx_get_cycle() + cycles;
641 while (cvmx_get_cycle() < done)
649 * Wait for the specified number of micro seconds
651 * @param usec micro seconds to wait
653 static inline void cvmx_wait_usec(uint64_t usec)
655 uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
656 while (cvmx_get_cycle() < done)
664 * Wait for the specified number of io clock cycles
668 static inline void cvmx_wait_io(uint64_t cycles)
670 uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_SCLK) + cycles;
672 while (cvmx_clock_get_count(CVMX_CLOCK_SCLK) < done)
680 * Perform a soft reset of Octeon
684 static inline void cvmx_reset_octeon(void)
686 cvmx_ciu_soft_rst_t ciu_soft_rst;
687 ciu_soft_rst.u64 = 0;
688 ciu_soft_rst.s.soft_rst = 1;
689 cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
694 * Read a byte of fuse data
695 * @param byte_addr address to read
697 * @return fuse value: 0 or 1
699 static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
701 cvmx_mio_fus_rcmd_t read_cmd;
704 read_cmd.s.addr = byte_addr;
706 cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
707 while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
709 return(read_cmd.s.dat);
714 * Read a single fuse bit
716 * @param fuse Fuse number (0-1024)
718 * @return fuse value: 0 or 1
720 static inline int cvmx_fuse_read(int fuse)
722 return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
729 #endif /* __CVMX_ACCESS_NATIVE_H__ */