1 /***********************license start***************
2 * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
41 * Functions for accessing memory and CSRs on Octeon when we are compiling
44 * <hr>$Revision: 38306 $<hr>
46 #ifndef __CVMX_ACCESS_NATIVE_H__
47 #define __CVMX_ACCESS_NATIVE_H__
54 * Returns the Octeon processor ID.
56 * @return Octeon processor ID from COP0
58 static inline uint32_t cvmx_get_proc_id(void)
60 #ifdef CVMX_BUILD_FOR_LINUX_USER
61 extern uint32_t cvmx_app_init_processor_id;
62 return cvmx_app_init_processor_id;
65 asm ("mfc0 %0, $15,0" : "=r" (id));
71 * Convert a memory pointer (void*) into a hardware compatable
72 * memory address (uint64_t). Octeon hardware widgets don't
73 * understand logical addresses.
75 * @param ptr C style memory pointer
76 * @return Hardware physical address
78 static inline uint64_t cvmx_ptr_to_phys(void *ptr)
80 if (CVMX_ENABLE_PARAMETER_CHECKING)
81 cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
83 #ifdef CVMX_BUILD_FOR_UBOOT
84 /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
85 ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
86 return(CAST64(ptr) & 0x7FFFFFFF);
90 if (sizeof(void*) == 8)
92 /* We're running in 64 bit mode. Normally this means that we can use
93 40 bits of address space (the hardware limit). Unfortunately there
94 is one case were we need to limit this to 30 bits, sign extended
95 32 bit. Although these are 64 bits wide, only 30 bits can be used */
96 if ((CAST64(ptr) >> 62) == 3)
97 return CAST64(ptr) & cvmx_build_mask(30);
99 return CAST64(ptr) & cvmx_build_mask(40);
104 return (long)(ptr) & 0x1fffffff;
106 extern uint64_t linux_mem32_offset;
107 if (cvmx_likely(ptr))
108 return CAST64(ptr) - linux_mem32_offset;
113 #elif defined(_WRS_KERNEL)
114 return (long)(ptr) & 0x7fffffff;
115 #elif defined(VXWORKS_USER_MAPPINGS)
116 /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
117 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
118 uint64_t address = (long)ptr;
119 if (address & 0x80000000)
120 return address & 0x1fffffff; /* KSEG pointers directly map the lower 256MB and bootbus */
121 else if ((address >= 0x10000000) && (address < 0x20000000))
122 return address + 0x400000000ull; /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
124 return address; /* Looks to be a 1:1 mapped userspace pointer */
125 #elif defined(__FreeBSD__) && defined(_KERNEL)
126 return (pmap_kextract((vm_offset_t)ptr));
128 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
129 /* We are assumung we're running the Simple Executive standalone. In this
130 mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
131 addresses are never used. Since we know all this, save the masking
132 cycles and do nothing */
136 if (sizeof(void*) == 8)
138 /* We're running in 64 bit mode. Normally this means that we can use
139 40 bits of address space (the hardware limit). Unfortunately there
140 is one case were we need to limit this to 30 bits, sign extended
141 32 bit. Although these are 64 bits wide, only 30 bits can be used */
142 if ((CAST64(ptr) >> 62) == 3)
143 return CAST64(ptr) & cvmx_build_mask(30);
145 return CAST64(ptr) & cvmx_build_mask(40);
148 return (long)(ptr) & 0x7fffffff;
156 * Convert a hardware physical address (uint64_t) into a
157 * memory pointer (void *).
159 * @param physical_address
160 * Hardware physical address to memory
161 * @return Pointer to memory
163 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
165 if (CVMX_ENABLE_PARAMETER_CHECKING)
166 cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
168 #ifdef CVMX_BUILD_FOR_UBOOT
169 /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
170 ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
171 if (physical_address >= 0x80000000)
174 return CASTPTR(void, (physical_address & 0x7FFFFFFF));
178 if (sizeof(void*) == 8)
180 /* Just set the top bit, avoiding any TLB uglyness */
181 return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
186 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
188 extern uint64_t linux_mem32_offset;
189 if (cvmx_likely(physical_address))
190 return CASTPTR(void, physical_address + linux_mem32_offset);
195 #elif defined(_WRS_KERNEL)
196 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
197 #elif defined(VXWORKS_USER_MAPPINGS)
198 /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
199 2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
200 if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
201 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
202 else if ((physical_address >= 0x410000000ull) && (physical_address < 0x420000000ull))
203 return CASTPTR(void, physical_address - 0x400000000ull);
205 return CASTPTR(void, physical_address);
206 #elif defined(__FreeBSD__) && defined(_KERNEL)
207 #if defined(__mips_n64)
208 return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
210 if (physical_address < 0x20000000)
211 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
213 panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
217 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
218 /* We are assumung we're running the Simple Executive standalone. In this
219 mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
220 addresses are never used. Since we know all this, save bit insert
221 cycles and do nothing */
222 return CASTPTR(void, physical_address);
224 /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
225 if (sizeof(void*) == 8)
226 return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
228 return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
236 /* The following #if controls the definition of the macro
237 CVMX_BUILD_WRITE64. This macro is used to build a store operation to
238 a full 64bit address. With a 64bit ABI, this can be done with a simple
239 pointer access. 32bit ABIs require more complicated assembly */
240 #if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
242 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
243 a simple volatile pointer */
244 #define CVMX_BUILD_WRITE64(TYPE, ST) \
245 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
247 *CASTPTR(volatile TYPE##_t, addr) = val; \
250 #elif defined(CVMX_ABI_N32)
252 /* The N32 ABI passes all 64bit quantities in a single register, so it is
253 possible to use the arguments directly. We have to use inline assembly
254 for the actual store since a pointer would truncate the address */
255 #define CVMX_BUILD_WRITE64(TYPE, ST) \
256 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
258 asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
261 #elif defined(CVMX_ABI_O32)
264 #define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
267 /* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
268 separate registers. Assembly must be used to put them back together
269 before they're used. What should be a simple store becomes a
270 convoluted mess of shifts and ors */
271 #define CVMX_BUILD_WRITE64(TYPE, ST) \
272 static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
274 if (sizeof(TYPE##_t) == 8) \
276 uint32_t csr_addrh = csr_addr>>32; \
277 uint32_t csr_addrl = csr_addr; \
278 uint32_t valh = (uint64_t)val>>32; \
279 uint32_t vall = val; \
287 "dsll %[tmp1], %[valh], 32\n" \
288 "dsll %[tmp2], %[csrh], 32\n" \
289 "dsll %[tmp3], %[vall], 32\n" \
290 "dsrl %[tmp3], %[tmp3], 32\n" \
291 "or %[tmp1], %[tmp1], %[tmp3]\n" \
292 "dsll %[tmp3], %[csrl], 32\n" \
293 "dsrl %[tmp3], %[tmp3], 32\n" \
294 "or %[tmp2], %[tmp2], %[tmp3]\n" \
295 ST " %[tmp1], 0(%[tmp2])\n" \
297 : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
298 : [valh] "r" (valh), [vall] "r" (vall), \
299 [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
304 uint32_t csr_addrh = csr_addr>>32; \
305 uint32_t csr_addrl = csr_addr; \
312 "dsll %[tmp1], %[csrh], 32\n" \
313 "dsll %[tmp2], %[csrl], 32\n" \
314 "dsrl %[tmp2], %[tmp2], 32\n" \
315 "or %[tmp1], %[tmp1], %[tmp2]\n" \
316 ST " %[val], 0(%[tmp1])\n" \
318 : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \
319 : [val] "r" (val), [csrh] "r" (csr_addrh), \
320 [csrl] "r" (csr_addrl) \
329 /* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
330 #error: Unsupported ABI
334 /* The following #if controls the definition of the macro
335 CVMX_BUILD_READ64. This macro is used to build a load operation from
336 a full 64bit address. With a 64bit ABI, this can be done with a simple
337 pointer access. 32bit ABIs require more complicated assembly */
338 #if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
340 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
341 a simple volatile pointer */
342 #define CVMX_BUILD_READ64(TYPE, LT) \
343 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
345 return *CASTPTR(volatile TYPE##_t, addr); \
348 #elif defined(CVMX_ABI_N32)
350 /* The N32 ABI passes all 64bit quantities in a single register, so it is
351 possible to use the arguments directly. We have to use inline assembly
352 for the actual store since a pointer would truncate the address */
353 #define CVMX_BUILD_READ64(TYPE, LT) \
354 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
357 asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
361 #elif defined(CVMX_ABI_O32)
364 #define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
367 /* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
368 separate registers. Assembly must be used to put them back together
369 before they're used. What should be a simple load becomes a
370 convoluted mess of shifts and ors */
371 #define CVMX_BUILD_READ64(TYPE, LT) \
372 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr) \
374 if (sizeof(TYPE##_t) == 8) \
376 uint32_t csr_addrh = csr_addr>>32; \
377 uint32_t csr_addrl = csr_addr; \
384 "dsll %[valh], %[csrh], 32\n" \
385 "dsll %[vall], %[csrl], 32\n" \
386 "dsrl %[vall], %[vall], 32\n" \
387 "or %[valh], %[valh], %[vall]\n" \
388 LT " %[vall], 0(%[valh])\n" \
389 "dsrl %[valh], %[vall], 32\n" \
393 : [valh] "=&r" (valh), [vall] "=&r" (vall) \
394 : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
396 return ((uint64_t)valh<<32) | vall; \
400 uint32_t csr_addrh = csr_addr>>32; \
401 uint32_t csr_addrl = csr_addr; \
408 "dsll %[val], %[csrh], 32\n" \
409 "dsll %[tmp], %[csrl], 32\n" \
410 "dsrl %[tmp], %[tmp], 32\n" \
411 "or %[val], %[val], %[tmp]\n" \
412 LT " %[val], 0(%[val])\n" \
414 : [val] "=&r" (val), [tmp] "=&r" (tmp) \
415 : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
421 #endif /* __KERNEL__ */
425 /* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
426 #error: Unsupported ABI
430 /* The following defines 8 functions for writing to a 64bit address. Each
431 takes two arguments, the address and the value to write.
432 cvmx_write64_int64 cvmx_write64_uint64
433 cvmx_write64_int32 cvmx_write64_uint32
434 cvmx_write64_int16 cvmx_write64_uint16
435 cvmx_write64_int8 cvmx_write64_uint8 */
436 CVMX_BUILD_WRITE64(int64, "sd");
437 CVMX_BUILD_WRITE64(int32, "sw");
438 CVMX_BUILD_WRITE64(int16, "sh");
439 CVMX_BUILD_WRITE64(int8, "sb");
440 CVMX_BUILD_WRITE64(uint64, "sd");
441 CVMX_BUILD_WRITE64(uint32, "sw");
442 CVMX_BUILD_WRITE64(uint16, "sh");
443 CVMX_BUILD_WRITE64(uint8, "sb");
445 /* The following defines 8 functions for reading from a 64bit address. Each
446 takes the address as the only argument
447 cvmx_read64_int64 cvmx_read64_uint64
448 cvmx_read64_int32 cvmx_read64_uint32
449 cvmx_read64_int16 cvmx_read64_uint16
450 cvmx_read64_int8 cvmx_read64_uint8 */
451 CVMX_BUILD_READ64(int64, "ld");
452 CVMX_BUILD_READ64(int32, "lw");
453 CVMX_BUILD_READ64(int16, "lh");
454 CVMX_BUILD_READ64(int8, "lb");
455 CVMX_BUILD_READ64(uint64, "ld");
456 CVMX_BUILD_READ64(uint32, "lw");
457 CVMX_BUILD_READ64(uint16, "lhu");
458 CVMX_BUILD_READ64(uint8, "lbu");
460 static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
462 cvmx_write64_uint64(csr_addr, val);
464 /* Perform an immediate read after every write to an RSL register to force
465 the write to complete. It doesn't matter what RSL read we do, so we
466 choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
467 if ((csr_addr >> 40) == (0x800118))
468 cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
471 static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
473 cvmx_write64_uint64(io_addr, val);
476 static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
478 return cvmx_read64_uint64(csr_addr);
481 static inline void cvmx_send_single(uint64_t data)
483 const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
484 cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
487 static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
493 uint64_t scraddr : 8;
499 addr.s.scraddr = scraddr >> 3;
501 cvmx_send_single(addr.u64);
506 * Number of the Core on which the program is currently running.
508 * @return Number of cores
510 static inline unsigned int cvmx_get_core_num(void)
512 unsigned int core_num;
513 CVMX_RDHWRNV(core_num, 0);
519 * Returns the number of bits set in the provided value.
520 * Simple wrapper for POP instruction.
522 * @param val 32 bit value to count set bits in
524 * @return Number of bits set
526 static inline uint32_t cvmx_pop(uint32_t val)
535 * Returns the number of bits set in the provided value.
536 * Simple wrapper for DPOP instruction.
538 * @param val 64 bit value to count set bits in
540 * @return Number of bits set
542 static inline int cvmx_dpop(uint64_t val)
551 * Provide current cycle counter as a return value
553 * @return current cycle counter
555 static inline uint64_t cvmx_get_cycle(void)
557 #if defined(CVMX_ABI_O32)
558 uint32_t tmp_low, tmp_hi;
564 " rdhwr %[tmpl], $31 \n"
565 " dsrl %[tmph], %[tmpl], 32 \n"
569 : [tmpl] "=&r" (tmp_low), [tmph] "=&r" (tmp_hi) : );
571 return(((uint64_t)tmp_hi << 32) + tmp_low);
574 CVMX_RDHWR(cycle, 31);
581 * Reads a chip global cycle counter. This counts CPU cycles since
582 * chip reset. The counter is 64 bit.
583 * This register does not exist on CN38XX pass 1 silicion
585 * @return Global chip cycle count since chip reset.
587 static inline uint64_t cvmx_get_cycle_global(void)
589 if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
592 return cvmx_read64_uint64(CVMX_IPD_CLK_COUNT);
597 * Wait for the specified number of cycle
601 static inline void cvmx_wait(uint64_t cycles)
603 uint64_t done = cvmx_get_cycle() + cycles;
605 while (cvmx_get_cycle() < done)
613 * Wait for the specified number of micro seconds
615 * @param usec micro seconds to wait
617 static inline void cvmx_wait_usec(uint64_t usec)
619 uint64_t done = cvmx_get_cycle() + usec * cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
620 while (cvmx_get_cycle() < done)
628 * Perform a soft reset of Octeon
632 static inline void cvmx_reset_octeon(void)
634 cvmx_ciu_soft_rst_t ciu_soft_rst;
635 ciu_soft_rst.u64 = 0;
636 ciu_soft_rst.s.soft_rst = 1;
637 cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
642 * Read a byte of fuse data
643 * @param byte_addr address to read
645 * @return fuse value: 0 or 1
647 static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
649 cvmx_mio_fus_rcmd_t read_cmd;
652 read_cmd.s.addr = byte_addr;
654 cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
655 while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
657 return(read_cmd.s.dat);
662 * Read a single fuse bit
664 * @param fuse Fuse number (0-1024)
666 * @return fuse value: 0 or 1
668 static inline int cvmx_fuse_read(int fuse)
670 return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
677 #endif /* __CVMX_ACCESS_NATIVE_H__ */