]> CyberLeo.Net >> Repos - FreeBSD/releng/8.2.git/blob - sys/contrib/octeon-sdk/cvmx-access-native.h
Copy stable/8 to releng/8.2 in preparation for FreeBSD-8.2 release.
[FreeBSD/releng/8.2.git] / sys / contrib / octeon-sdk / cvmx-access-native.h
1 /***********************license start***************
2  *  Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights
3  *  reserved.
4  *
5  *
6  *  Redistribution and use in source and binary forms, with or without
7  *  modification, are permitted provided that the following conditions are
8  *  met:
9  *
10  *      * Redistributions of source code must retain the above copyright
11  *        notice, this list of conditions and the following disclaimer.
12  *
13  *      * Redistributions in binary form must reproduce the above
14  *        copyright notice, this list of conditions and the following
15  *        disclaimer in the documentation and/or other materials provided
16  *        with the distribution.
17  *
18  *      * Neither the name of Cavium Networks nor the names of
19  *        its contributors may be used to endorse or promote products
20  *        derived from this software without specific prior written
21  *        permission.
22  *
23  *  TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24  *  AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25  *  OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26  *  RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27  *  REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28  *  DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29  *  OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30  *  PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31  *  POSSESSION OR CORRESPONDENCE TO DESCRIPTION.  THE ENTIRE RISK ARISING OUT
32  *  OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
33  *
34  *
35  *  For any questions regarding licensing please contact marketing@caviumnetworks.com
36  *
37  ***********************license end**************************************/
38
39 /**
40  * @file
41  * Functions for accessing memory and CSRs on Octeon when we are compiling
42  * natively.
43  *
44  * <hr>$Revision: 38306 $<hr>
45 */
46 #ifndef __CVMX_ACCESS_NATIVE_H__
47 #define __CVMX_ACCESS_NATIVE_H__
48
49 #ifdef  __cplusplus
50 extern "C" {
51 #endif
52
53 /**
54  * Returns the Octeon processor ID.
55  *
56  * @return Octeon processor ID from COP0
57  */
58 static inline uint32_t cvmx_get_proc_id(void)
59 {
60 #ifdef CVMX_BUILD_FOR_LINUX_USER
61     extern uint32_t cvmx_app_init_processor_id;
62     return cvmx_app_init_processor_id;
63 #else
64     uint32_t id;
65     asm ("mfc0 %0, $15,0" : "=r" (id));
66     return id;
67 #endif
68 }
69
70 /**
71  * Convert a memory pointer (void*) into a hardware compatable
72  * memory address (uint64_t). Octeon hardware widgets don't
73  * understand logical addresses.
74  *
75  * @param ptr    C style memory pointer
76  * @return Hardware physical address
77  */
78 static inline uint64_t cvmx_ptr_to_phys(void *ptr)
79 {
80     if (CVMX_ENABLE_PARAMETER_CHECKING)
81         cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
82
83 #ifdef CVMX_BUILD_FOR_UBOOT
84     /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
85     ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
86     return(CAST64(ptr) & 0x7FFFFFFF);
87 #endif
88
89 #ifdef __linux__
90     if (sizeof(void*) == 8)
91     {
92         /* We're running in 64 bit mode. Normally this means that we can use
93             40 bits of address space (the hardware limit). Unfortunately there
94             is one case were we need to limit this to 30 bits, sign extended
95             32 bit. Although these are 64 bits wide, only 30 bits can be used */
96         if ((CAST64(ptr) >> 62) == 3)
97             return CAST64(ptr) & cvmx_build_mask(30);
98         else
99             return CAST64(ptr) & cvmx_build_mask(40);
100     }
101     else
102     {
103 #ifdef __KERNEL__
104         return (long)(ptr) & 0x1fffffff;
105 #else
106         extern uint64_t linux_mem32_offset;
107         if (cvmx_likely(ptr))
108             return CAST64(ptr) - linux_mem32_offset;
109         else
110             return 0;
111 #endif
112     }
113 #elif defined(_WRS_KERNEL)
114         return (long)(ptr) & 0x7fffffff;
115 #elif defined(VXWORKS_USER_MAPPINGS)
116     /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
117         2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
118     uint64_t address = (long)ptr;
119     if (address & 0x80000000)
120         return address & 0x1fffffff;    /* KSEG pointers directly map the lower 256MB and bootbus */
121     else if ((address >= 0x10000000) && (address < 0x20000000))
122         return address + 0x400000000ull;   /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
123     else
124         return address; /* Looks to be a 1:1 mapped userspace pointer */
125 #elif defined(__FreeBSD__) && defined(_KERNEL)
126     return (pmap_kextract((vm_offset_t)ptr));
127 #else
128 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
129     /* We are assumung we're running the Simple Executive standalone. In this
130         mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
131         addresses are never used. Since we know all this, save the masking
132         cycles and do nothing */
133     return CAST64(ptr);
134 #else
135
136     if (sizeof(void*) == 8)
137     {
138         /* We're running in 64 bit mode. Normally this means that we can use
139             40 bits of address space (the hardware limit). Unfortunately there
140             is one case were we need to limit this to 30 bits, sign extended
141             32 bit. Although these are 64 bits wide, only 30 bits can be used */
142         if ((CAST64(ptr) >> 62) == 3)
143             return CAST64(ptr) & cvmx_build_mask(30);
144         else
145             return CAST64(ptr) & cvmx_build_mask(40);
146     }
147     else
148         return (long)(ptr) & 0x7fffffff;
149
150 #endif
151 #endif
152 }
153
154
155 /**
156  * Convert a hardware physical address (uint64_t) into a
157  * memory pointer (void *).
158  *
159  * @param physical_address
160  *               Hardware physical address to memory
161  * @return Pointer to memory
162  */
163 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
164 {
165     if (CVMX_ENABLE_PARAMETER_CHECKING)
166         cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
167
168 #ifdef CVMX_BUILD_FOR_UBOOT
169     /* U-boot is a special case, as it is running in error level, which disables the TLB completely.
170     ** U-boot may use kseg0 addresses, or may directly use physical addresses already */
171     if (physical_address >= 0x80000000)
172         return NULL;
173     else
174         return CASTPTR(void, (physical_address & 0x7FFFFFFF));
175 #endif
176
177 #ifdef __linux__
178     if (sizeof(void*) == 8)
179     {
180         /* Just set the top bit, avoiding any TLB uglyness */
181         return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
182     }
183     else
184     {
185 #ifdef __KERNEL__
186         return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
187 #else
188         extern uint64_t linux_mem32_offset;
189         if (cvmx_likely(physical_address))
190             return CASTPTR(void, physical_address + linux_mem32_offset);
191         else
192             return NULL;
193 #endif
194     }
195 #elif defined(_WRS_KERNEL)
196         return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
197 #elif defined(VXWORKS_USER_MAPPINGS)
198     /* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
199         2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
200     if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
201         return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
202     else if ((physical_address >= 0x410000000ull) && (physical_address < 0x420000000ull))
203         return CASTPTR(void, physical_address - 0x400000000ull);
204     else
205         return CASTPTR(void, physical_address);
206 #elif defined(__FreeBSD__) && defined(_KERNEL)
207 #if defined(__mips_n64)
208     return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
209 #else
210     if (physical_address < 0x20000000)
211         return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
212     else
213         panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
214 #endif
215 #else
216
217 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
218         /* We are assumung we're running the Simple Executive standalone. In this
219             mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
220             addresses are never used. Since we know all this, save bit insert
221             cycles and do nothing */
222     return CASTPTR(void, physical_address);
223 #else
224     /* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
225     if (sizeof(void*) == 8)
226         return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
227     else
228         return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
229
230 #endif
231
232 #endif
233 }
234
235
236 /* The following #if controls the definition of the macro
237     CVMX_BUILD_WRITE64. This macro is used to build a store operation to
238     a full 64bit address. With a 64bit ABI, this can be done with a simple
239     pointer access. 32bit ABIs require more complicated assembly */
240 #if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
241
242 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
243     a simple volatile pointer */
244 #define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
245 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
246 {                                                                       \
247     *CASTPTR(volatile TYPE##_t, addr) = val;                            \
248 }
249
250 #elif defined(CVMX_ABI_N32)
251
252 /* The N32 ABI passes all 64bit quantities in a single register, so it is
253     possible to use the arguments directly. We have to use inline assembly
254     for the actual store since a pointer would truncate the address */
255 #define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
256 static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val)     \
257 {                                                                       \
258     asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
259 }
260
261 #elif defined(CVMX_ABI_O32)
262
263 #ifdef __KERNEL__
264 #define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
265 #else
266
267 /* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
268     separate registers. Assembly must be used to put them back together
269     before they're used. What should be a simple store becomes a
270     convoluted mess of shifts and ors */
271 #define CVMX_BUILD_WRITE64(TYPE, ST)                                    \
272 static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
273 {                                                                       \
274     if (sizeof(TYPE##_t) == 8)                                          \
275     {                                                                   \
276         uint32_t csr_addrh = csr_addr>>32;                              \
277         uint32_t csr_addrl = csr_addr;                                  \
278         uint32_t valh = (uint64_t)val>>32;                              \
279         uint32_t vall = val;                                            \
280         uint32_t tmp1;                                                  \
281         uint32_t tmp2;                                                  \
282         uint32_t tmp3;                                                  \
283                                                                         \
284         asm volatile (                                                  \
285             ".set push\n"                                             \
286             ".set mips64\n"                                             \
287             "dsll   %[tmp1], %[valh], 32\n"                             \
288             "dsll   %[tmp2], %[csrh], 32\n"                             \
289             "dsll   %[tmp3], %[vall], 32\n"                             \
290             "dsrl   %[tmp3], %[tmp3], 32\n"                             \
291             "or     %[tmp1], %[tmp1], %[tmp3]\n"                        \
292             "dsll   %[tmp3], %[csrl], 32\n"                             \
293             "dsrl   %[tmp3], %[tmp3], 32\n"                             \
294             "or     %[tmp2], %[tmp2], %[tmp3]\n"                        \
295             ST "    %[tmp1], 0(%[tmp2])\n"                              \
296             ".set pop\n"                                             \
297             : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
298             : [valh] "r" (valh), [vall] "r" (vall),                     \
299               [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
300         );                                                              \
301     }                                                                   \
302     else                                                                \
303     {                                                                   \
304         uint32_t csr_addrh = csr_addr>>32;                              \
305         uint32_t csr_addrl = csr_addr;                                  \
306         uint32_t tmp1;                                                  \
307         uint32_t tmp2;                                                  \
308                                                                         \
309         asm volatile (                                                  \
310             ".set push\n"                                             \
311             ".set mips64\n"                                             \
312             "dsll   %[tmp1], %[csrh], 32\n"                             \
313             "dsll   %[tmp2], %[csrl], 32\n"                             \
314             "dsrl   %[tmp2], %[tmp2], 32\n"                             \
315             "or     %[tmp1], %[tmp1], %[tmp2]\n"                        \
316             ST "    %[val], 0(%[tmp1])\n"                               \
317             ".set pop\n"                                             \
318             : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2)                  \
319             : [val] "r" (val), [csrh] "r" (csr_addrh),                  \
320               [csrl] "r" (csr_addrl)                                    \
321         );                                                              \
322     }                                                                   \
323 }
324
325 #endif
326
327 #else
328
329 /* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
330 #error: Unsupported ABI
331
332 #endif
333
334 /* The following #if controls the definition of the macro
335     CVMX_BUILD_READ64. This macro is used to build a load operation from
336     a full 64bit address. With a 64bit ABI, this can be done with a simple
337     pointer access. 32bit ABIs require more complicated assembly */
338 #if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
339
340 /* We have a full 64bit ABI. Writing to a 64bit address can be done with
341     a simple volatile pointer */
342 #define CVMX_BUILD_READ64(TYPE, LT)                                     \
343 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
344 {                                                                       \
345     return *CASTPTR(volatile TYPE##_t, addr);                           \
346 }
347
348 #elif defined(CVMX_ABI_N32)
349
350 /* The N32 ABI passes all 64bit quantities in a single register, so it is
351     possible to use the arguments directly. We have to use inline assembly
352     for the actual store since a pointer would truncate the address */
353 #define CVMX_BUILD_READ64(TYPE, LT)                                     \
354 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr)                \
355 {                                                                       \
356     TYPE##_t val;                                                       \
357     asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
358     return val;                                                         \
359 }
360
361 #elif defined(CVMX_ABI_O32)
362
363 #ifdef __KERNEL__
364 #define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
365 #else
366
367 /* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
368     separate registers. Assembly must be used to put them back together
369     before they're used. What should be a simple load becomes a
370     convoluted mess of shifts and ors */
371 #define CVMX_BUILD_READ64(TYPE, LT)                                     \
372 static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr)            \
373 {                                                                       \
374     if (sizeof(TYPE##_t) == 8)                                          \
375     {                                                                   \
376         uint32_t csr_addrh = csr_addr>>32;                              \
377         uint32_t csr_addrl = csr_addr;                                  \
378         uint32_t valh;                                                  \
379         uint32_t vall;                                                  \
380                                                                         \
381         asm volatile (                                                  \
382             ".set push\n"                                               \
383             ".set mips64\n"                                             \
384             "dsll   %[valh], %[csrh], 32\n"                             \
385             "dsll   %[vall], %[csrl], 32\n"                             \
386             "dsrl   %[vall], %[vall], 32\n"                             \
387             "or     %[valh], %[valh], %[vall]\n"                        \
388             LT "    %[vall], 0(%[valh])\n"                              \
389             "dsrl   %[valh], %[vall], 32\n"                             \
390             "sll    %[vall], 0\n"                                       \
391             "sll    %[valh], 0\n"                                       \
392             ".set pop\n"                                                \
393             : [valh] "=&r" (valh), [vall] "=&r" (vall)                  \
394             : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
395         );                                                              \
396         return ((uint64_t)valh<<32) | vall;                             \
397     }                                                                   \
398     else                                                                \
399     {                                                                   \
400         uint32_t csr_addrh = csr_addr>>32;                              \
401         uint32_t csr_addrl = csr_addr;                                  \
402         TYPE##_t val;                                                   \
403         uint32_t tmp;                                                   \
404                                                                         \
405         asm volatile (                                                  \
406             ".set push\n"                                             \
407             ".set mips64\n"                                             \
408             "dsll   %[val], %[csrh], 32\n"                              \
409             "dsll   %[tmp], %[csrl], 32\n"                              \
410             "dsrl   %[tmp], %[tmp], 32\n"                               \
411             "or     %[val], %[val], %[tmp]\n"                           \
412             LT "    %[val], 0(%[val])\n"                                \
413             ".set pop\n"                                             \
414             : [val] "=&r" (val), [tmp] "=&r" (tmp)                      \
415             : [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl)            \
416         );                                                              \
417         return val;                                                     \
418     }                                                                   \
419 }
420
421 #endif /* __KERNEL__ */
422
423 #else
424
425 /* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
426 #error: Unsupported ABI
427
428 #endif
429
430 /* The following defines 8 functions for writing to a 64bit address. Each
431     takes two arguments, the address and the value to write.
432     cvmx_write64_int64      cvmx_write64_uint64
433     cvmx_write64_int32      cvmx_write64_uint32
434     cvmx_write64_int16      cvmx_write64_uint16
435     cvmx_write64_int8       cvmx_write64_uint8 */
436 CVMX_BUILD_WRITE64(int64, "sd");
437 CVMX_BUILD_WRITE64(int32, "sw");
438 CVMX_BUILD_WRITE64(int16, "sh");
439 CVMX_BUILD_WRITE64(int8, "sb");
440 CVMX_BUILD_WRITE64(uint64, "sd");
441 CVMX_BUILD_WRITE64(uint32, "sw");
442 CVMX_BUILD_WRITE64(uint16, "sh");
443 CVMX_BUILD_WRITE64(uint8, "sb");
444
445 /* The following defines 8 functions for reading from a 64bit address. Each
446     takes the address as the only argument
447     cvmx_read64_int64       cvmx_read64_uint64
448     cvmx_read64_int32       cvmx_read64_uint32
449     cvmx_read64_int16       cvmx_read64_uint16
450     cvmx_read64_int8        cvmx_read64_uint8 */
451 CVMX_BUILD_READ64(int64, "ld");
452 CVMX_BUILD_READ64(int32, "lw");
453 CVMX_BUILD_READ64(int16, "lh");
454 CVMX_BUILD_READ64(int8, "lb");
455 CVMX_BUILD_READ64(uint64, "ld");
456 CVMX_BUILD_READ64(uint32, "lw");
457 CVMX_BUILD_READ64(uint16, "lhu");
458 CVMX_BUILD_READ64(uint8, "lbu");
459
460 static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
461 {
462     cvmx_write64_uint64(csr_addr, val);
463
464     /* Perform an immediate read after every write to an RSL register to force
465         the write to complete. It doesn't matter what RSL read we do, so we
466         choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
467     if ((csr_addr >> 40) == (0x800118))
468         cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
469 }
470
471 static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
472 {
473     cvmx_write64_uint64(io_addr, val);
474 }
475
476 static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
477 {
478     return cvmx_read64_uint64(csr_addr);
479 }
480
481 static inline void cvmx_send_single(uint64_t data)
482 {
483     const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
484     cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
485 }
486
487 static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
488 {
489     union
490     {
491         uint64_t    u64;
492         struct {
493             uint64_t    scraddr : 8;
494             uint64_t    len     : 8;
495             uint64_t    addr    :48;
496         } s;
497     } addr;
498     addr.u64 = csr_addr;
499     addr.s.scraddr = scraddr >> 3;
500     addr.s.len = 1;
501     cvmx_send_single(addr.u64);
502 }
503
504
505 /**
506  * Number of the Core on which the program is currently running. 
507  *
508  * @return Number of cores
509  */
510 static inline unsigned int cvmx_get_core_num(void)
511 {
512     unsigned int core_num;
513     CVMX_RDHWRNV(core_num, 0);
514     return core_num;
515 }
516
517
518 /**
519  * Returns the number of bits set in the provided value.
520  * Simple wrapper for POP instruction.
521  *
522  * @param val    32 bit value to count set bits in
523  *
524  * @return Number of bits set
525  */
526 static inline uint32_t cvmx_pop(uint32_t val)
527 {
528     uint32_t pop;
529     CVMX_POP(pop, val);
530     return pop;
531 }
532
533
534 /**
535  * Returns the number of bits set in the provided value.
536  * Simple wrapper for DPOP instruction.
537  *
538  * @param val    64 bit value to count set bits in
539  *
540  * @return Number of bits set
541  */
542 static inline int cvmx_dpop(uint64_t val)
543 {
544     int pop;
545     CVMX_DPOP(pop, val);
546     return pop;
547 }
548
549
550 /**
551  * Provide current cycle counter as a return value
552  *
553  * @return current cycle counter
554  */
555 static inline uint64_t cvmx_get_cycle(void)
556 {
557 #if defined(CVMX_ABI_O32)
558     uint32_t tmp_low, tmp_hi;
559
560     asm volatile (
561                "   .set push                    \n"
562                "   .set mips64r2                \n"
563                "   .set noreorder               \n"
564                "   rdhwr %[tmpl], $31           \n"
565                "   dsrl  %[tmph], %[tmpl], 32   \n"
566                "   sll   %[tmpl], 0             \n"
567                "   sll   %[tmph], 0             \n"
568                "   .set pop                 \n"
569                   : [tmpl] "=&r" (tmp_low), [tmph] "=&r" (tmp_hi) : );
570
571     return(((uint64_t)tmp_hi << 32) + tmp_low);
572 #else
573     uint64_t cycle;
574     CVMX_RDHWR(cycle, 31);
575     return(cycle);
576 #endif
577 }
578
579
580 /**
581  * Reads a chip global cycle counter.  This counts CPU cycles since
582  * chip reset.  The counter is 64 bit.
583  * This register does not exist on CN38XX pass 1 silicion
584  *
585  * @return Global chip cycle count since chip reset.
586  */
587 static inline uint64_t cvmx_get_cycle_global(void)
588 {
589     if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
590         return 0;
591     else
592         return cvmx_read64_uint64(CVMX_IPD_CLK_COUNT);
593 }
594
595
596 /**
597  * Wait for the specified number of cycle
598  *
599  * @param cycles
600  */
601 static inline void cvmx_wait(uint64_t cycles)
602 {
603     uint64_t done = cvmx_get_cycle() + cycles;
604
605     while (cvmx_get_cycle() < done)
606     {
607         /* Spin */
608     }
609 }
610
611
612 /**
613  * Wait for the specified number of micro seconds
614  *
615  * @param usec   micro seconds to wait
616  */
617 static inline void cvmx_wait_usec(uint64_t usec)
618 {
619     uint64_t done = cvmx_get_cycle() + usec * cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
620     while (cvmx_get_cycle() < done)
621     {
622         /* Spin */
623     }
624 }
625
626
627 /**
628  * Perform a soft reset of Octeon
629  *
630  * @return
631  */
632 static inline void cvmx_reset_octeon(void)
633 {
634     cvmx_ciu_soft_rst_t ciu_soft_rst;
635     ciu_soft_rst.u64 = 0;
636     ciu_soft_rst.s.soft_rst = 1;
637     cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
638 }
639
640
641 /**
642  * Read a byte of fuse data
643  * @param byte_addr   address to read
644  *
645  * @return fuse value: 0 or 1
646  */
647 static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
648 {
649     cvmx_mio_fus_rcmd_t read_cmd;
650
651     read_cmd.u64 = 0;
652     read_cmd.s.addr = byte_addr;
653     read_cmd.s.pend = 1;
654     cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
655     while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
656         ;
657     return(read_cmd.s.dat);
658 }
659
660
661 /**
662  * Read a single fuse bit
663  *
664  * @param fuse   Fuse number (0-1024)
665  *
666  * @return fuse value: 0 or 1
667  */
668 static inline int cvmx_fuse_read(int fuse)
669 {
670     return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
671 }
672
673 #ifdef  __cplusplus
674 }
675 #endif
676
677 #endif /* __CVMX_ACCESS_NATIVE_H__ */
678