1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
49 * Implementation of the Level 2 Cache (L2C) control,
50 * measurement, and debugging facilities.
52 * <hr>$Revision: 52004 $<hr>
55 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
56 #include <asm/octeon/cvmx.h>
57 #include <asm/octeon/cvmx-l2c.h>
58 #include <asm/octeon/cvmx-spinlock.h>
60 #if !defined(__FreeBSD__) || !defined(_KERNEL)
61 #include "cvmx-config.h"
65 #include "cvmx-spinlock.h"
66 #include "cvmx-interrupt.h"
69 #ifndef CVMX_BUILD_FOR_LINUX_HOST
70 /* This spinlock is used internally to ensure that only one core is performing
71 ** certain L2 operations at a time.
73 ** NOTE: This only protects calls from within a single application - if multiple applications
74 ** or operating systems are running, then it is up to the user program to coordinate between them.
76 CVMX_SHARED cvmx_spinlock_t cvmx_l2c_spinlock;
79 CVMX_SHARED cvmx_spinlock_t cvmx_l2c_vrt_spinlock;
81 int cvmx_l2c_get_core_way_partition(uint32_t core)
85 /* Validate the core number */
86 if (core >= cvmx_octeon_num_cores())
89 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
90 return (cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff);
92 /* Use the lower two bits of the coreNumber to determine the bit offset
93 * of the UMSK[] field in the L2C_SPAR register.
95 field = (core & 0x3) * 8;
97 /* Return the UMSK[] field from the appropriate L2C_SPAR register based
104 return((cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field);
106 return((cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field);
108 return((cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field);
110 return((cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field);
115 int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
120 valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
124 /* A UMSK setting which blocks all L2C Ways is an error on some chips */
125 if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
128 /* Validate the core number */
129 if (core >= cvmx_octeon_num_cores())
132 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
134 cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
138 /* Use the lower two bits of core to determine the bit offset of the
139 * UMSK[] field in the L2C_SPAR register.
141 field = (core & 0x3) * 8;
143 /* Assign the new mask setting to the UMSK[] field in the appropriate
144 * L2C_SPAR register based on the core_num.
150 cvmx_write_csr(CVMX_L2C_SPAR0,
151 (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
155 cvmx_write_csr(CVMX_L2C_SPAR1,
156 (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
160 cvmx_write_csr(CVMX_L2C_SPAR2,
161 (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
165 cvmx_write_csr(CVMX_L2C_SPAR3,
166 (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
174 int cvmx_l2c_set_hw_way_partition(uint32_t mask)
178 valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
181 /* A UMSK setting which blocks all L2C Ways is an error on some chips */
182 if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
185 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
186 cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
188 cvmx_write_csr(CVMX_L2C_SPAR4, (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
192 int cvmx_l2c_get_hw_way_partition(void)
194 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
195 return(cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff);
197 return(cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF));
200 void cvmx_l2c_config_perf(uint32_t counter, cvmx_l2c_event_t event,
201 uint32_t clear_on_read)
204 if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
206 cvmx_l2c_pfctl_t pfctl;
208 pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
213 pfctl.s.cnt0sel = event;
215 pfctl.s.cnt0rdclr = clear_on_read;
218 pfctl.s.cnt1sel = event;
220 pfctl.s.cnt1rdclr = clear_on_read;
223 pfctl.s.cnt2sel = event;
225 pfctl.s.cnt2rdclr = clear_on_read;
229 pfctl.s.cnt3sel = event;
231 pfctl.s.cnt3rdclr = clear_on_read;
235 cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
239 cvmx_l2c_tadx_prf_t l2c_tadx_prf;
242 cvmx_warn("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
244 cvmx_warn_if(clear_on_read, "L2C counters don't support clear on read for this chip\n");
246 l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
251 l2c_tadx_prf.s.cnt0sel = event;
254 l2c_tadx_prf.s.cnt1sel = event;
257 l2c_tadx_prf.s.cnt2sel = event;
261 l2c_tadx_prf.s.cnt3sel = event;
264 for (tad=0; tad<CVMX_L2C_TADS; tad++)
265 cvmx_write_csr(CVMX_L2C_TADX_PRF(tad), l2c_tadx_prf.u64);
269 uint64_t cvmx_l2c_read_perf(uint32_t counter)
274 if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
275 return(cvmx_read_csr(CVMX_L2C_PFC0));
278 uint64_t counter = 0;
280 for (tad=0; tad<CVMX_L2C_TADS; tad++)
281 counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
285 if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
286 return(cvmx_read_csr(CVMX_L2C_PFC1));
289 uint64_t counter = 0;
291 for (tad=0; tad<CVMX_L2C_TADS; tad++)
292 counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
296 if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
297 return(cvmx_read_csr(CVMX_L2C_PFC2));
300 uint64_t counter = 0;
302 for (tad=0; tad<CVMX_L2C_TADS; tad++)
303 counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
308 if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
309 return(cvmx_read_csr(CVMX_L2C_PFC3));
312 uint64_t counter = 0;
314 for (tad=0; tad<CVMX_L2C_TADS; tad++)
315 counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
321 #ifndef CVMX_BUILD_FOR_LINUX_HOST
324 * Helper function use to fault in cache lines for L2 cache locking
326 * @param addr Address of base of memory region to read into L2 cache
327 * @param len Length (in bytes) of region to fault in
329 static void fault_in(uint64_t addr, int len)
333 /* Adjust addr and length so we get all cache lines even for
334 ** small ranges spanning two cache lines */
335 len += addr & CVMX_CACHE_LINE_MASK;
336 addr &= ~CVMX_CACHE_LINE_MASK;
337 ptr = (volatile char *)cvmx_phys_to_ptr(addr);
338 CVMX_DCACHE_INVALIDATE; /* Invalidate L1 cache to make sure all loads result in data being in L2 */
342 len -= CVMX_CACHE_LINE_SIZE;
343 ptr += CVMX_CACHE_LINE_SIZE;
347 int cvmx_l2c_lock_line(uint64_t addr)
349 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
351 int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
352 uint64_t assoc = cvmx_l2c_get_num_assoc();
353 uint64_t tag = addr >> shift;
354 uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
356 cvmx_l2c_tadx_tag_t l2c_tadx_tag;
358 CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
360 /* Make sure we were able to lock the line */
361 for (way = 0; way < assoc; way++)
363 CVMX_CACHE_LTGL2I(index | (way << shift), 0);
364 CVMX_SYNC; // make sure CVMX_L2C_TADX_TAG is updated
365 l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
366 if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
370 /* Check if a valid line is found */
373 //cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr);
377 /* Check if lock bit is not set */
378 if (!l2c_tadx_tag.s.lock)
380 //cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr);
389 cvmx_l2c_dbg_t l2cdbg;
390 cvmx_l2c_lckbase_t lckbase;
391 cvmx_l2c_lckoff_t lckoff;
392 cvmx_l2t_err_t l2t_err;
394 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
400 /* Clear l2t error bits if set */
401 l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
402 l2t_err.s.lckerr = 1;
403 l2t_err.s.lckerr2 = 1;
404 cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
406 addr &= ~CVMX_CACHE_LINE_MASK;
408 /* Set this core as debug core */
409 l2cdbg.s.ppnum = cvmx_get_core_num();
411 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
412 cvmx_read_csr(CVMX_L2C_DBG);
414 lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
415 cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
416 cvmx_read_csr(CVMX_L2C_LCKOFF);
418 if (((cvmx_l2c_cfg_t)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias)
420 int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
421 uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
422 lckbase.s.lck_base = addr_tmp >> 7;
426 lckbase.s.lck_base = addr >> 7;
429 lckbase.s.lck_ena = 1;
430 cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
431 cvmx_read_csr(CVMX_L2C_LCKBASE); // Make sure it gets there
433 fault_in(addr, CVMX_CACHE_LINE_SIZE);
435 lckbase.s.lck_ena = 0;
436 cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
437 cvmx_read_csr(CVMX_L2C_LCKBASE); // Make sure it gets there
439 /* Stop being debug core */
440 cvmx_write_csr(CVMX_L2C_DBG, 0);
441 cvmx_read_csr(CVMX_L2C_DBG);
443 l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
444 if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
445 retval = 1; /* We were unable to lock the line */
447 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
453 int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
457 /* Round start/end to cache line boundaries */
458 len += start & CVMX_CACHE_LINE_MASK;
459 start &= ~CVMX_CACHE_LINE_MASK;
460 len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
464 retval += cvmx_l2c_lock_line(start);
465 start += CVMX_CACHE_LINE_SIZE;
466 len -= CVMX_CACHE_LINE_SIZE;
473 void cvmx_l2c_flush(void)
476 uint64_t n_assoc, n_set;
478 n_set = cvmx_l2c_get_num_sets();
479 n_assoc = cvmx_l2c_get_num_assoc();
481 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
484 /* These may look like constants, but they aren't... */
485 int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
486 int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
487 for (set=0; set < n_set; set++)
489 for(assoc=0; assoc < n_assoc; assoc++)
491 address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
492 (assoc << assoc_shift) |
494 CVMX_CACHE_WBIL2I(address, 0);
500 for (set=0; set < n_set; set++)
501 for(assoc=0; assoc < n_assoc; assoc++)
502 cvmx_l2c_flush_line(assoc, set);
507 int cvmx_l2c_unlock_line(uint64_t address)
510 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
512 int assoc; cvmx_l2c_tag_t tag;
514 uint32_t index = cvmx_l2c_address_to_index(address);
516 tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
518 /* For 63XX, we can flush a line by using the physical address directly,
519 ** so finding the cache line used by the address is only required to provide
520 ** the proper return value for the function.
522 for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
524 tag = cvmx_l2c_get_tag(assoc, index);
526 if (tag.s.V && (tag.s.addr == tag_addr))
528 CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
539 uint32_t index = cvmx_l2c_address_to_index(address);
541 /* Compute portion of address that is stored in tag */
542 tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
543 for(assoc = 0; assoc < CVMX_L2_ASSOC; assoc++)
545 tag = cvmx_l2c_get_tag(assoc, index);
547 if (tag.s.V && (tag.s.addr == tag_addr))
549 cvmx_l2c_flush_line(assoc, index);
557 int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
559 int num_unlocked = 0;
560 /* Round start/end to cache line boundaries */
561 len += start & CVMX_CACHE_LINE_MASK;
562 start &= ~CVMX_CACHE_LINE_MASK;
563 len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
566 num_unlocked += cvmx_l2c_unlock_line(start);
567 start += CVMX_CACHE_LINE_SIZE;
568 len -= CVMX_CACHE_LINE_SIZE;
575 /* Internal l2c tag types. These are converted to a generic structure
576 ** that can be used on all chips */
580 #if __BYTE_ORDER == __BIG_ENDIAN
581 struct cvmx_l2c_tag_cn50xx
583 uint64_t reserved : 40;
584 uint64_t V : 1; // Line valid
585 uint64_t D : 1; // Line dirty
586 uint64_t L : 1; // Line locked
587 uint64_t U : 1; // Use, LRU eviction
588 uint64_t addr : 20; // Phys mem addr (33..14)
590 struct cvmx_l2c_tag_cn30xx
592 uint64_t reserved : 41;
593 uint64_t V : 1; // Line valid
594 uint64_t D : 1; // Line dirty
595 uint64_t L : 1; // Line locked
596 uint64_t U : 1; // Use, LRU eviction
597 uint64_t addr : 19; // Phys mem addr (33..15)
599 struct cvmx_l2c_tag_cn31xx
601 uint64_t reserved : 42;
602 uint64_t V : 1; // Line valid
603 uint64_t D : 1; // Line dirty
604 uint64_t L : 1; // Line locked
605 uint64_t U : 1; // Use, LRU eviction
606 uint64_t addr : 18; // Phys mem addr (33..16)
608 struct cvmx_l2c_tag_cn38xx
610 uint64_t reserved : 43;
611 uint64_t V : 1; // Line valid
612 uint64_t D : 1; // Line dirty
613 uint64_t L : 1; // Line locked
614 uint64_t U : 1; // Use, LRU eviction
615 uint64_t addr : 17; // Phys mem addr (33..17)
617 struct cvmx_l2c_tag_cn58xx
619 uint64_t reserved : 44;
620 uint64_t V : 1; // Line valid
621 uint64_t D : 1; // Line dirty
622 uint64_t L : 1; // Line locked
623 uint64_t U : 1; // Use, LRU eviction
624 uint64_t addr : 16; // Phys mem addr (33..18)
626 struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
627 struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
634 * Function to read a L2C tag. This code make the current core
635 * the 'debug core' for the L2. This code must only be executed by
638 * @param assoc Association (way) of the tag to dump
639 * @param index Index of the cacheline
641 * @return The Octeon model specific tag structure. This is translated by a wrapper
642 * function to a generic form that is easier for applications to use.
644 static __cvmx_l2c_tag_t __read_l2_tag(uint64_t assoc, uint64_t index)
647 uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
648 uint64_t core = cvmx_get_core_num();
649 __cvmx_l2c_tag_t tag_val;
650 uint64_t dbg_addr = CVMX_L2C_DBG;
653 cvmx_l2c_dbg_t debug_val;
655 /* For low core count parts, the core number is always small enough
656 ** to stay in the correct field and not set any reserved bits */
657 debug_val.s.ppnum = core;
659 debug_val.s.set = assoc;
661 CVMX_SYNC; /* Make sure core is quiet (no prefetches, etc.) before entering debug mode */
662 CVMX_DCACHE_INVALIDATE; /* Flush L1 to make sure debug load misses L1 */
664 cvmx_local_irq_save(flags);
666 /* The following must be done in assembly as when in debug mode all data loads from
667 ** L2 return special debug data, not normal memory contents. Also, interrupts must be disabled,
668 ** since if an interrupt occurs while in debug mode the ISR will get debug data from all its memory
669 ** reads instead of the contents of memory */
675 " sd %[dbg_val], 0(%[dbg_addr]) \n" /* Enter debug mode, wait for store */
676 " ld $0, 0(%[dbg_addr]) \n"
677 " ld %[tag_val], 0(%[tag_addr]) \n" /* Read L2C tag data */
678 " sd $0, 0(%[dbg_addr]) \n" /* Exit debug mode, wait for store */
679 " ld $0, 0(%[dbg_addr]) \n"
680 " cache 9, 0($0) \n" /* Invalidate dcache to discard debug data */
682 :[tag_val] "=r" (tag_val): [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr) : "memory");
684 cvmx_local_irq_restore(flags);
691 cvmx_l2c_tag_t cvmx_l2c_get_tag(uint32_t association, uint32_t index)
696 if ((int)association >= cvmx_l2c_get_num_assoc())
698 cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
701 if ((int)index >= cvmx_l2c_get_num_sets())
703 cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n", (int)index, cvmx_l2c_get_num_sets());
706 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
708 cvmx_l2c_tadx_tag_t l2c_tadx_tag;
709 uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
710 (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
711 (index << CVMX_L2C_IDX_ADDR_SHIFT));
712 /* Use L2 cache Index load tag cache instruction, as hardware loads
713 the virtual tag for the L2 cache block with the contents of
714 L2C_TAD0_TAG register. */
715 CVMX_CACHE_LTGL2I(address, 0);
716 CVMX_SYNC; // make sure CVMX_L2C_TADX_TAG is updated
717 l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
719 tag.s.V = l2c_tadx_tag.s.valid;
720 tag.s.D = l2c_tadx_tag.s.dirty;
721 tag.s.L = l2c_tadx_tag.s.lock;
722 tag.s.U = l2c_tadx_tag.s.use;
723 tag.s.addr = l2c_tadx_tag.s.tag;
727 __cvmx_l2c_tag_t tmp_tag;
728 /* __read_l2_tag is intended for internal use only */
729 tmp_tag = __read_l2_tag(association, index);
731 /* Convert all tag structure types to generic version, as it can represent all models */
732 if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX))
734 tag.s.V = tmp_tag.cn58xx.V;
735 tag.s.D = tmp_tag.cn58xx.D;
736 tag.s.L = tmp_tag.cn58xx.L;
737 tag.s.U = tmp_tag.cn58xx.U;
738 tag.s.addr = tmp_tag.cn58xx.addr;
740 else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
742 tag.s.V = tmp_tag.cn38xx.V;
743 tag.s.D = tmp_tag.cn38xx.D;
744 tag.s.L = tmp_tag.cn38xx.L;
745 tag.s.U = tmp_tag.cn38xx.U;
746 tag.s.addr = tmp_tag.cn38xx.addr;
748 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
750 tag.s.V = tmp_tag.cn31xx.V;
751 tag.s.D = tmp_tag.cn31xx.D;
752 tag.s.L = tmp_tag.cn31xx.L;
753 tag.s.U = tmp_tag.cn31xx.U;
754 tag.s.addr = tmp_tag.cn31xx.addr;
756 else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
758 tag.s.V = tmp_tag.cn30xx.V;
759 tag.s.D = tmp_tag.cn30xx.D;
760 tag.s.L = tmp_tag.cn30xx.L;
761 tag.s.U = tmp_tag.cn30xx.U;
762 tag.s.addr = tmp_tag.cn30xx.addr;
764 else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
766 tag.s.V = tmp_tag.cn50xx.V;
767 tag.s.D = tmp_tag.cn50xx.D;
768 tag.s.L = tmp_tag.cn50xx.L;
769 tag.s.U = tmp_tag.cn50xx.U;
770 tag.s.addr = tmp_tag.cn50xx.addr;
774 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
783 uint32_t cvmx_l2c_address_to_index (uint64_t addr)
785 uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
788 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
790 cvmx_l2c_ctl_t l2c_ctl;
791 l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
792 indxalias = !l2c_ctl.s.disidxalias;
796 cvmx_l2c_cfg_t l2c_cfg;
797 l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
798 indxalias = l2c_cfg.s.idxalias;
803 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
805 uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
806 idx ^= idx / cvmx_l2c_get_num_sets();
811 idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
814 idx &= CVMX_L2C_IDX_MASK;
818 int cvmx_l2c_get_cache_size_bytes(void)
820 return (cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() * CVMX_CACHE_LINE_SIZE);
824 * Return log base 2 of the number of sets in the L2 cache
827 int cvmx_l2c_get_set_bits(void)
830 if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
831 OCTEON_IS_MODEL(OCTEON_CN58XX))
832 l2_set_bits = 11; /* 2048 sets */
833 else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
834 l2_set_bits = 10; /* 1024 sets */
835 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
836 l2_set_bits = 9; /* 512 sets */
837 else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
838 l2_set_bits = 8; /* 256 sets */
839 else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
840 l2_set_bits = 7; /* 128 sets */
843 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
844 l2_set_bits = 11; /* 2048 sets */
850 /* Return the number of sets in the L2 Cache */
851 int cvmx_l2c_get_num_sets(void)
853 return (1 << cvmx_l2c_get_set_bits());
856 /* Return the number of associations in the L2 Cache */
857 int cvmx_l2c_get_num_assoc(void)
860 if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
861 OCTEON_IS_MODEL(OCTEON_CN52XX) ||
862 OCTEON_IS_MODEL(OCTEON_CN58XX) ||
863 OCTEON_IS_MODEL(OCTEON_CN50XX) ||
864 OCTEON_IS_MODEL(OCTEON_CN38XX))
866 else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
868 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
869 OCTEON_IS_MODEL(OCTEON_CN30XX))
873 cvmx_dprintf("Unsupported OCTEON Model in %s\n", __FUNCTION__);
877 /* Check to see if part of the cache is disabled */
878 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
880 cvmx_mio_fus_dat3_t mio_fus_dat3;
882 mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
883 /* cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
884 <2> will be not used for 63xx
885 <1> disables 1/2 ways
886 <0> disables 1/4 ways
887 They are cumulative, so for 63xx:
890 0 1 12-way 1.5MB cache
892 1 1 4-way 512KB cache */
894 if (mio_fus_dat3.s.l2c_crip == 3)
896 else if (mio_fus_dat3.s.l2c_crip == 2)
898 else if (mio_fus_dat3.s.l2c_crip == 1)
904 val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
905 /* Using shifts here, as bit position names are different for
906 each model but they all mean the same. */
907 if ((val.u64 >> 35) & 0x1)
908 l2_assoc = l2_assoc >> 2;
909 else if ((val.u64 >> 34) & 0x1)
910 l2_assoc = l2_assoc >> 1;
917 #ifndef CVMX_BUILD_FOR_LINUX_HOST
919 * Flush a line from the L2 cache
920 * This should only be called from one core at a time, as this routine
921 * sets the core to the 'debug' core in order to flush the line.
923 * @param assoc Association (or way) to flush
924 * @param index Index to flush
926 void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
928 /* Check the range of the index. */
929 if (index > (uint32_t)cvmx_l2c_get_num_sets())
931 cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
935 /* Check the range of association. */
936 if (assoc > (uint32_t)cvmx_l2c_get_num_assoc())
938 cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
942 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
945 /* Create the address based on index and association.
946 Bits<20:17> select the way of the cache block involved in
948 Bits<16:7> of the effect address select the index */
949 address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
950 (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
951 (index << CVMX_L2C_IDX_ADDR_SHIFT));
952 CVMX_CACHE_WBIL2I(address, 0);
956 cvmx_l2c_dbg_t l2cdbg;
959 if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
960 l2cdbg.s.ppnum = cvmx_get_core_num();
963 l2cdbg.s.set = assoc;
964 cvmx_spinlock_lock(&cvmx_l2c_spinlock);
965 /* Enter debug mode, and make sure all other writes complete before we
966 ** enter debug mode */
968 cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
969 cvmx_read_csr(CVMX_L2C_DBG);
971 CVMX_PREPARE_FOR_STORE (CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, index*CVMX_CACHE_LINE_SIZE), 0);
972 /* Exit debug mode */
974 cvmx_write_csr(CVMX_L2C_DBG, 0);
975 cvmx_read_csr(CVMX_L2C_DBG);
976 cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
981 #ifndef CVMX_BUILD_FOR_LINUX_HOST
983 /* L2C Virtualization APIs. These APIs are based on Octeon II documentation. */
987 * Helper function to decode VALUE to number of allowed virtualization IDS.
988 * Returns L2C_VRT_CTL[NUMID].
990 * @param nvid Number of virtual Ids.
991 * @return On success decode to NUMID, or to -1 on failure.
993 static inline int __cvmx_l2c_vrt_decode_numid(int nvid)
998 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
1000 if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
1002 cvmx_dprintf("WARNING: Invalid number of virtual ids(%d) requested, should be <= 64\n", nvid);
1008 if ((nvid & 1) == 0)
1015 if (bits == 1 || (zero_bits && ((bits - zero_bits) == 1)))
1022 * Set maxium number of Virtual IDs allowed in a machine.
1024 * @param nvid Number of virtial ids allowed in a machine.
1025 * @return Return 0 on success or -1 on failure.
1027 int cvmx_l2c_vrt_set_max_virtids(int nvid)
1029 if (OCTEON_IS_MODEL(OCTEON_CN63XX))
1031 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1033 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1035 if (l2c_vrt_ctl.s.enable)
1037 cvmx_dprintf("WARNING: Changing number of Virtual Machine IDs is not allowed after Virtualization is enabled\n");
1041 if (nvid < 1 || nvid > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
1043 cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_max_virtids: Invalid number of Virtual Machine IDs(%d) requested, max allowed %d\n", nvid, CVMX_L2C_VRT_MAX_VIRTID_ALLOWED);
1047 /* Calculate the numid based on nvid */
1048 l2c_vrt_ctl.s.numid = __cvmx_l2c_vrt_decode_numid(nvid);
1049 cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1055 * Get maxium number of virtual IDs allowed in a machine.
1057 * @return Return number of virtual machine IDs or -1 on failure.
1059 int cvmx_l2c_vrt_get_max_virtids(void)
1063 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1065 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1066 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1067 virtids = 1 << (l2c_vrt_ctl.s.numid + 1);
1068 if (virtids > CVMX_L2C_VRT_MAX_VIRTID_ALLOWED)
1070 cvmx_dprintf("WARNING: cvmx_l2c_vrt_get_max_virtids: Invalid number of Virtual IDs initialized (%d)\n", virtids);
1079 * Helper function to decode VALUE to memory space coverage of L2C_VRT_MEM.
1080 * Returns L2C_VRT_CTL[MEMSZ].
1082 * @param memsz Memory in GB.
1083 * @return On success, decode to MEMSZ, or on failure return -1.
1085 static inline int __cvmx_l2c_vrt_decode_memsize(int memsz)
1090 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1092 if (memsz == 0 || memsz > CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED)
1094 cvmx_dprintf("WARNING: Invalid virtual memory size(%d) requested, should be <= %d\n", memsz, CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED);
1100 if ((memsz & 1) == 0)
1107 if (bits == 1 || (bits - zero_bits) == 1)
1114 * Set the maxium size of memory space to be allocated for virtualization.
1116 * @param memsz Size of the virtual memory in GB
1117 * @return Return 0 on success or -1 on failure.
1119 int cvmx_l2c_vrt_set_max_memsz(int memsz)
1121 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1123 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1126 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1128 if (l2c_vrt_ctl.s.enable)
1130 cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Changing the size of the memory after Virtualization is enabled is not allowed.\n");
1134 if (memsz >= (int)(cvmx_sysinfo_get()->system_dram_size / 1000000))
1136 cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), greater than available on the chip\n", memsz);
1140 decode = __cvmx_l2c_vrt_decode_memsize(memsz);
1143 cvmx_dprintf("WARNING: cvmx_l2c_vrt_set_memsz: Invalid memory size (%d GB), refer to L2C_VRT_CTL[MEMSZ] for more information\n", memsz);
1147 l2c_vrt_ctl.s.memsz = decode;
1148 cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1154 * Set a Virtual ID to a set of cores.
1156 * @param virtid Assign virtid to a set of cores.
1157 * @param coremask The group of cores to assign a unique virtual id.
1158 * @return Return 0 on success, otherwise -1.
1160 int cvmx_l2c_vrt_assign_virtid(int virtid, uint32_t coremask)
1164 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1167 int max_virtid = cvmx_l2c_vrt_get_max_virtids();
1169 if (virtid > max_virtid)
1171 cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Max %d number of virtids are allowed, passed %d.\n", max_virtid, virtid);
1175 while (core < cvmx_octeon_num_cores())
1177 if ((coremask >> core) & 1)
1179 cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1180 cvmx_l2c_virtid_iobx_t l2c_virtid_iobx;
1181 l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1183 /* Check if the core already has a virtid assigned. */
1184 if (l2c_virtid_ppx.s.id)
1186 cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Changing virtid of core #%d to %d from %d.\n",
1187 (unsigned int)core, virtid, l2c_virtid_ppx.s.id);
1189 /* Flush L2 cache to avoid write errors */
1192 cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), virtid & 0x3f);
1194 /* Set the IOB to normal mode. */
1195 l2c_virtid_iobx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_IOBX(core));
1196 l2c_virtid_iobx.s.id = 1;
1197 l2c_virtid_iobx.s.dwbid = 0;
1198 cvmx_write_csr(CVMX_L2C_VIRTID_IOBX(core), l2c_virtid_iobx.u64);
1204 /* Invalid coremask passed. */
1207 cvmx_dprintf("WARNING: cvmx_l2c_vrt_assign_virt_id: Invalid coremask(0x%x) passed\n", (unsigned int)coremask);
1215 * Remove a virt id assigned to a set of cores. Update the virtid mask and
1216 * virtid stored for each core.
1218 * @param virtid Remove the specified Virtualization machine ID.
1220 void cvmx_l2c_vrt_remove_virtid(int virtid)
1222 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1225 cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1227 for (core = 0; core < cvmx_octeon_num_cores(); core++)
1229 l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1230 if (virtid == l2c_virtid_ppx.s.id)
1231 cvmx_write_csr(CVMX_L2C_VIRTID_PPX(core), 0);
1237 * Helper function to protect the memory region based on the granularity.
1239 static uint64_t __cvmx_l2c_vrt_get_granularity(void)
1243 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1247 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1249 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1250 nvid = cvmx_l2c_vrt_get_max_virtids();
1251 szd = (1ull << l2c_vrt_ctl.s.memsz) * 1024 * 1024 * 1024;
1252 gran = (unsigned long long)(szd * nvid)/(32ull * 1024);
1258 * Block a memory region to be updated for a given virtual id.
1260 * @param start_addr Starting address of memory region
1261 * @param size Size of the memory to protect
1262 * @param virtid Virtual ID to use
1263 * @param mode Allow/Disallow write access
1264 * = 0, Allow write access by virtid
1265 * = 1, Disallow write access by virtid
1267 int cvmx_l2c_vrt_memprotect(uint64_t start_addr, int size, int virtid, int mode)
1269 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1271 /* Check the alignment of start address, should be aligned to the
1273 uint64_t gran = __cvmx_l2c_vrt_get_granularity();
1274 uint64_t end_addr = start_addr + size;
1275 int byte_offset, virtid_offset;
1276 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1277 cvmx_l2c_vrt_memx_t l2c_vrt_mem;
1279 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1281 /* No need to protect if virtualization is not enabled */
1282 if (!l2c_vrt_ctl.s.enable)
1284 cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization is not enabled.\n");
1288 if (virtid > cvmx_l2c_vrt_get_max_virtids())
1290 cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id is greater than max allowed\n");
1294 /* No need to protect if virtid is not assigned to a core */
1296 cvmx_l2c_virtid_ppx_t l2c_virtid_ppx;
1300 for (core = 0; core < cvmx_octeon_num_cores(); core++)
1302 l2c_virtid_ppx.u64 = cvmx_read_csr(CVMX_L2C_VIRTID_PPX(core));
1303 if (l2c_virtid_ppx.s.id == virtid)
1311 cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Virtualization id (%d) is not assigned to any core.\n", virtid);
1316 /* Make sure previous stores are through before protecting the memory. */
1319 /* If the L2/DRAM physical address is >= 512 MB, subtract 256 MB
1320 to get the address to use. This is because L2C removes the 256MB
1321 "hole" between DR0 and DR1. */
1322 if (start_addr >= (512 * 1024 * 1024))
1323 start_addr -= 256 * 1024 * 1024;
1325 if (start_addr != ((start_addr + (gran - 1)) & ~(gran - 1)))
1327 cvmx_dprintf("WARNING: cvmx_l2c_vrt_memprotect: Start address is not aligned\n");
1331 /* Check the size of the memory to protect, should be aligned to the
1333 if (end_addr != ((end_addr + (gran - 1)) & ~(gran - 1)))
1335 end_addr = (start_addr + (gran - 1)) & ~(gran - 1);
1336 size = start_addr - end_addr;
1339 byte_offset = l2c_vrt_ctl.s.memsz + l2c_vrt_ctl.s.numid + 16;
1340 virtid_offset = 14 - l2c_vrt_ctl.s.numid;
1342 cvmx_spinlock_lock(&cvmx_l2c_vrt_spinlock);
1344 /* Enable memory protection for each virtid for the specified range. */
1345 while (start_addr < end_addr)
1347 /* When L2C virtualization is enabled and a bit is set in
1348 L2C_VRT_MEM(0..1023), then L2C prevents the selected virtual
1349 machine from storing to the selected L2C/DRAM region. */
1350 int offset, position, i;
1351 int l2c_vrt_mem_bit_index = start_addr >> byte_offset;
1352 l2c_vrt_mem_bit_index |= (virtid << virtid_offset);
1354 offset = l2c_vrt_mem_bit_index >> 5;
1355 position = l2c_vrt_mem_bit_index & 0x1f;
1357 l2c_vrt_mem.u64 = cvmx_read_csr(CVMX_L2C_VRT_MEMX(offset));
1358 /* Allow/Disallow write access to memory. */
1360 l2c_vrt_mem.s.data &= ~(1 << position);
1362 l2c_vrt_mem.s.data |= 1 << position;
1363 l2c_vrt_mem.s.parity = 0;
1364 /* PARITY<i> is the even parity of DATA<i*8+7:i*8>, which means
1365 that each bit<i> in PARITY[0..3], is the XOR of all the bits
1366 in the corresponding byte in DATA. */
1367 for (i = 0; i <= 4; i++)
1369 uint64_t mask = 0xffull << (i*8);
1370 if ((cvmx_pop(l2c_vrt_mem.s.data & mask) & 0x1))
1371 l2c_vrt_mem.s.parity |= (1ull << i);
1373 cvmx_write_csr(CVMX_L2C_VRT_MEMX(offset), l2c_vrt_mem.u64);
1377 cvmx_spinlock_unlock(&cvmx_l2c_vrt_spinlock);
1384 * Enable virtualization.
1386 * @param mode Whether out of bound writes are an error.
1388 void cvmx_l2c_vrt_enable(int mode)
1390 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1392 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1394 /* Enable global virtualization */
1395 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1396 l2c_vrt_ctl.s.ooberr = mode;
1397 l2c_vrt_ctl.s.enable = 1;
1398 cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);
1403 * Disable virtualization.
1405 void cvmx_l2c_vrt_disable(void)
1407 if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1409 cvmx_l2c_vrt_ctl_t l2c_vrt_ctl;
1411 /* Disable global virtualization */
1412 l2c_vrt_ctl.u64 = cvmx_read_csr(CVMX_L2C_VRT_CTL);
1413 l2c_vrt_ctl.s.enable = 0;
1414 cvmx_write_csr(CVMX_L2C_VRT_CTL, l2c_vrt_ctl.u64);