1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
46 * cvmx-tlb supplies per core TLB access functions for simple executive
49 * <hr>$Revision: 41586 $<hr>
58 * Convert page mask to string
60 static inline const char* __mask_to_str(uint64_t mask)
62 /* Most OCTEON processor does not support 1K page sizes */
63 uint64_t non_1k_mask = mask + CVMX_TLB_PAGEMASK_4K;
65 switch (non_1k_mask) {
66 case CVMX_TLB_PAGEMASK_4K: return "4kb";
67 case CVMX_TLB_PAGEMASK_16K: return "16kb";
68 case CVMX_TLB_PAGEMASK_64K: return "64kb";
69 case CVMX_TLB_PAGEMASK_256K: return "256kb";
70 case CVMX_TLB_PAGEMASK_1M: return "1Mb";
71 case CVMX_TLB_PAGEMASK_4M: return "4Mb";
72 case CVMX_TLB_PAGEMASK_16M: return "16Mb";
73 case CVMX_TLB_PAGEMASK_64M: return "64Mb";
74 case CVMX_TLB_PAGEMASK_256M: return "256Mb";
82 * issue the tlb read instruction
84 static inline void __tlb_read(void){
92 * issue the tlb write instruction
94 static inline void __tlb_write(void){
103 * issue the tlb read instruction
105 static inline int __tlb_probe(uint64_t hi){
108 CVMX_MT_ENTRY_HIGH(hi);
112 CVMX_MF_TLB_INDEX(index);
114 if (index < 0) index = -1;
121 * read a single tlb entry
123 * return 0: tlb entry is read
124 * -1: index is invalid
126 static inline int __tlb_read_index(uint32_t tlbi){
128 if (tlbi >= cvmx_tlb_size_limit()) {
132 CVMX_MT_TLB_INDEX(tlbi);
140 * write a single tlb entry
142 * return 0: tlb entry is read
143 * -1: index is invalid
145 static inline int __tlb_write_index(uint32_t tlbi,
146 uint64_t hi, uint64_t lo0,
147 uint64_t lo1, uint64_t pagemask)
150 if (tlbi >= cvmx_tlb_size_limit()) {
155 cvmx_dprintf("cvmx-tlb-dbg: "
156 "write TLB %d: hi %lx, lo0 %lx, lo1 %lx, pagemask %lx \n",
157 tlbi, hi, lo0, lo1, pagemask);
160 CVMX_MT_TLB_INDEX(tlbi);
161 CVMX_MT_ENTRY_HIGH(hi);
162 CVMX_MT_ENTRY_LO_0(lo0);
163 CVMX_MT_ENTRY_LO_1(lo1);
164 CVMX_MT_PAGEMASK(pagemask);
172 * Determine if a TLB entry is free to use
174 static inline int __tlb_entry_is_free(uint32_t tlbi) {
176 uint64_t lo0 = 0, lo1 = 0;
178 if (tlbi < cvmx_tlb_size_limit()) {
180 __tlb_read_index(tlbi);
182 /* Unused entries have neither even nor odd page mapped */
183 CVMX_MF_ENTRY_LO_0(lo0);
184 CVMX_MF_ENTRY_LO_1(lo1);
186 if ( !(lo0 & TLB_VALID) && !(lo1 & TLB_VALID)) {
197 * dump a single tlb entry
199 static inline void __tlb_dump_index(uint32_t tlbi)
201 if (tlbi < cvmx_tlb_size_limit()) {
203 if (__tlb_entry_is_free(tlbi)) {
205 cvmx_dprintf("Index: %3d Free \n", tlbi);
208 uint64_t lo0, lo1, pgmask;
214 __tlb_read_index(tlbi);
216 CVMX_MF_ENTRY_HIGH(hi);
217 CVMX_MF_ENTRY_LO_0(lo0);
218 CVMX_MF_ENTRY_LO_1(lo1);
219 CVMX_MF_PAGEMASK(pgmask);
223 cvmx_dprintf("Index: %3d pgmask=%s ", tlbi, __mask_to_str(pgmask));
226 c0 = ( lo0 >> 3 ) & 7;
227 c1 = ( lo1 >> 3 ) & 7;
230 cvmx_dprintf("va=%0*lx asid=%02x\n",
231 width, (hi & ~0x1fffUL), hi & 0xff);
233 cvmx_dprintf("\t[pa=%0*lx c=%d d=%d v=%d g=%d] ",
235 (lo0 << 6) & PAGE_MASK, c0,
239 cvmx_dprintf("[pa=%0*lx c=%d d=%d v=%d g=%d]\n",
241 (lo1 << 6) & PAGE_MASK, c1,
253 * dump a single tlb entry
255 static inline uint32_t __tlb_wired_index() {
258 CVMX_MF_TLB_WIRED(tlbi);
263 * Set up a wired entry. This function is designed to be used by Simple
264 * Executive to set up its virtual to physical address mapping at start up
265 * time. After the mapping is set up, the remaining unused TLB entries can
266 * be use for run time shared memory mapping.
268 * Calling this function causes the C0 wired index register to increase.
269 * Wired index register points to the separation between fixed TLB mapping
270 * and run time shared memory mapping.
273 * @param lo0 Entry Low0
274 * @param lo1 Entry Low1
275 * @param pagemask Pagemask
277 * @return 0: the entry is added
278 * @return -1: out of TLB entry
280 int cvmx_tlb_add_wired_entry( uint64_t hi, uint64_t lo0,
281 uint64_t lo1, uint64_t pagemask)
286 index = __tlb_wired_index();
288 /* Check to make sure if the index is free to use */
289 if (index < cvmx_tlb_size_limit() && __tlb_entry_is_free(index) ) {
290 /* increase the wired index by 1*/
291 __tlb_write_index(index, hi, lo0, lo1, pagemask);
292 CVMX_MT_TLB_WIRED(index + 1);
300 * Find a free entry that can be used for share memory mapping.
302 * @return -1: no free entry found
303 * @return : a free entry
305 int cvmx_tlb_allocate_runtime_entry(void)
307 uint32_t i, ret = -1;
309 for (i = __tlb_wired_index(); i< cvmx_tlb_size_limit(); i++) {
311 /* Check to make sure the index is free to use */
312 if (__tlb_entry_is_free(i)) {
313 /* Found and return */
323 * Invalidate the TLB entry. Remove previous mapping if one was set up
325 void cvmx_tlb_free_runtime_entry(uint32_t tlbi)
327 /* Invalidate an unwired TLB entry */
328 if ((tlbi < cvmx_tlb_size_limit()) && (tlbi >= __tlb_wired_index())) {
329 __tlb_write_index(tlbi, 0xffffffff80000000ULL, 0, 0, 0);
335 * Program a single TLB entry to enable the provided vaddr to paddr mapping.
337 * @param index Index of the TLB entry
338 * @param vaddr The virtual address for this mapping
339 * @param paddr The physical address for this mapping
340 * @param size Size of the mapping
341 * @param tlb_flags Entry mapping flags
344 void cvmx_tlb_write_entry(int index, uint64_t vaddr, uint64_t paddr,
345 uint64_t size, uint64_t tlb_flags) {
346 uint64_t lo0, lo1, hi, pagemask;
348 if ( __is_power_of_two(size) ) {
349 if ( (__log2(size) & 1 ) == 0) {
350 /* size is not power of 4, we only need to map
351 one page, figure out even or odd page to map */
352 if ((vaddr >> __log2(size) & 1)) {
354 lo1 = ((paddr >> 12) << 6) | tlb_flags;
355 hi = ((vaddr - size) >> 12) << 12;
357 lo0 = ((paddr >> 12) << 6) | tlb_flags;
359 hi = ((vaddr) >> 12) << 12;
361 pagemask = (size - 1) & (~1<<11);
363 lo0 = ((paddr >> 12)<< 6) | tlb_flags;
364 lo1 = (((paddr + size /2) >> 12) << 6) | tlb_flags;
365 hi = ((vaddr) >> 12) << 12;
366 pagemask = ((size/2) -1) & (~1<<11);
370 __tlb_write_index(index, hi, lo0, lo1, pagemask);
377 * Program a single TLB entry to enable the provided vaddr to paddr mapping.
378 * This version adds a wired entry that should not be changed at run time
380 * @param vaddr The virtual address for this mapping
381 * @param paddr The physical address for this mapping
382 * @param size Size of the mapping
383 * @param tlb_flags Entry mapping flags
384 * @return -1: TLB out of entries
385 * 0: fixed entry added
387 int cvmx_tlb_add_fixed_entry( uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t tlb_flags) {
392 CVMX_MF_TLB_WIRED(index);
394 /* Check to make sure if the index is free to use */
395 if (index < cvmx_tlb_size_limit() && __tlb_entry_is_free(index) ) {
396 cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
398 if (!__tlb_entry_is_free(index)) {
399 /* Bump up the wired register*/
400 CVMX_MT_TLB_WIRED(index + 1);
409 * Program a single TLB entry to enable the provided vaddr to paddr mapping.
410 * This version writes a runtime entry. It will check the index to make sure
411 * not to overwrite any fixed entries.
413 * @param index Index of the TLB entry
414 * @param vaddr The virtual address for this mapping
415 * @param paddr The physical address for this mapping
416 * @param size Size of the mapping
417 * @param tlb_flags Entry mapping flags
419 void cvmx_tlb_write_runtime_entry(int index, uint64_t vaddr, uint64_t paddr,
420 uint64_t size, uint64_t tlb_flags)
424 CVMX_MF_TLB_WIRED(wired_index);
426 if (index >= wired_index) {
427 cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
435 * Find the TLB index of a given virtual address
437 * @param vaddr The virtual address to look up
438 * @return -1 not TLB mapped
441 int cvmx_tlb_lookup(uint64_t vaddr) {
442 uint64_t hi= (vaddr >> 12 ) << 12; /* We always use ASID 0 */
444 return __tlb_probe(hi);
448 * Debug routine to show all shared memory mapping
450 void cvmx_tlb_dump_shared_mapping(void) {
453 for ( tlbi = __tlb_wired_index(); tlbi<cvmx_tlb_size_limit(); tlbi++ ) {
454 __tlb_dump_index(tlbi);
459 * Debug routine to show all TLB entries of this core
462 void cvmx_tlb_dump_all(void) {
466 for (tlbi = 0; tlbi<= cvmx_tlb_size_limit(); tlbi++ ) {
467 __tlb_dump_index(tlbi);