]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
Remove remaining support of big endian byte order.
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * arm9 support code Copyright (C) 2001 ARM Ltd
7  * Copyright (c) 1997 Mark Brinicombe.
8  * Copyright (c) 1997 Causality Limited
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by Causality Limited.
22  * 4. The name of Causality Limited may not be used to endorse or promote
23  *    products derived from this software without specific prior written
24  *    permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * RiscBSD kernel project
39  *
40  * cpufuncs.c
41  *
42  * C functions for supporting CPU / MMU / TLB specific operations.
43  *
44  * Created      : 30/01/97
45  */
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/bus.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 #include <vm/uma.h>
61
62 #include <machine/cpufunc.h>
63
64 /* PRIMARY CACHE VARIABLES */
65 int     arm_picache_size;
66 int     arm_picache_line_size;
67 int     arm_picache_ways;
68
69 int     arm_pdcache_size;       /* and unified */
70 int     arm_pdcache_line_size;
71 int     arm_pdcache_ways;
72
73 int     arm_pcache_type;
74 int     arm_pcache_unified;
75
76 int     arm_dcache_align;
77 int     arm_dcache_align_mask;
78
79 u_int   arm_cache_level;
80 u_int   arm_cache_type[14];
81 u_int   arm_cache_loc;
82
83 #if defined(CPU_ARM9E)
84 static void arm10_setup(void);
85 #endif
86 #ifdef CPU_MV_PJ4B
87 static void pj4bv7_setup(void);
88 #endif
89 #if defined(CPU_ARM1176)
90 static void arm11x6_setup(void);
91 #endif
92 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 static void cortexa_setup(void);
94 #endif
95
96 #if defined(CPU_ARM9E)
97 struct cpu_functions armv5_ec_cpufuncs = {
98         /* CPU functions */
99
100         cpufunc_nullop,                 /* cpwait               */
101
102         /* MMU functions */
103
104         cpufunc_control,                /* control              */
105         armv5_ec_setttb,                /* Setttb               */
106
107         /* TLB functions */
108
109         armv4_tlb_flushID,              /* tlb_flushID          */
110         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
111         armv4_tlb_flushD,               /* tlb_flushD           */
112         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
113
114         /* Cache operations */
115
116         armv5_ec_icache_sync_range,     /* icache_sync_range    */
117
118         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
119         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
120         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
121         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
122
123         armv4_idcache_inv_all,          /* idcache_inv_all      */
124         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
125         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
126
127         cpufunc_nullop,                 /* l2cache_wbinv_all    */
128         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
129         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
130         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
131         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
132
133         /* Other functions */
134
135         armv4_drain_writebuf,           /* drain_writebuf       */
136
137         (void *)cpufunc_nullop,         /* sleep                */
138
139         /* Soft functions */
140
141         arm9_context_switch,            /* context_switch       */
142
143         arm10_setup                     /* cpu setup            */
144
145 };
146
147 struct cpu_functions sheeva_cpufuncs = {
148         /* CPU functions */
149
150         cpufunc_nullop,                 /* cpwait               */
151
152         /* MMU functions */
153
154         cpufunc_control,                /* control              */
155         sheeva_setttb,                  /* Setttb               */
156
157         /* TLB functions */
158
159         armv4_tlb_flushID,              /* tlb_flushID          */
160         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
161         armv4_tlb_flushD,               /* tlb_flushD           */
162         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
163
164         /* Cache operations */
165
166         armv5_ec_icache_sync_range,     /* icache_sync_range    */
167
168         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
169         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
170         sheeva_dcache_inv_range,        /* dcache_inv_range     */
171         sheeva_dcache_wb_range,         /* dcache_wb_range      */
172
173         armv4_idcache_inv_all,          /* idcache_inv_all      */
174         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
175         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
176
177         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
178         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
179         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
180         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
181         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
182
183         /* Other functions */
184
185         armv4_drain_writebuf,           /* drain_writebuf       */
186
187         sheeva_cpu_sleep,               /* sleep                */
188
189         /* Soft functions */
190
191         arm9_context_switch,            /* context_switch       */
192
193         arm10_setup                     /* cpu setup            */
194 };
195 #endif /* CPU_ARM9E */
196
197 #ifdef CPU_MV_PJ4B
198 struct cpu_functions pj4bv7_cpufuncs = {
199         /* Cache operations */
200         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
201         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
202         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
203         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
204         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
205
206         /* Other functions */
207         .cf_sleep = (void *)cpufunc_nullop,
208
209         /* Soft functions */
210         .cf_setup = pj4bv7_setup
211 };
212 #endif /* CPU_MV_PJ4B */
213
214 #if defined(CPU_ARM1176)
215 struct cpu_functions arm1176_cpufuncs = {
216         /* Cache operations */
217         .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
218         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
219         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
220         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
221         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
222
223         /* Other functions */
224         .cf_sleep = arm11x6_sleep, 
225
226         /* Soft functions */
227         .cf_setup = arm11x6_setup
228 };
229 #endif /*CPU_ARM1176 */
230
231 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
232 struct cpu_functions cortexa_cpufuncs = {
233         /* Cache operations */
234
235         /*
236          * Note: For CPUs using the PL310 the L2 ops are filled in when the
237          * L2 cache controller is actually enabled.
238          */
239         .cf_l2cache_wbinv_all = cpufunc_nullop,
240         .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
241         .cf_l2cache_inv_range = (void *)cpufunc_nullop,
242         .cf_l2cache_wb_range = (void *)cpufunc_nullop,
243         .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
244
245         /* Other functions */
246         .cf_sleep = armv7_cpu_sleep,
247
248         /* Soft functions */
249         .cf_setup = cortexa_setup
250 };
251 #endif /* CPU_CORTEXA || CPU_KRAIT */
252
253 /*
254  * Global constants also used by locore.s
255  */
256
257 struct cpu_functions cpufuncs;
258 u_int cputype;
259
260 #if defined (CPU_ARM9E) ||      \
261   defined(CPU_ARM1176) ||       \
262   defined(CPU_MV_PJ4B) ||                       \
263   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
264
265 static void get_cachetype_cp15(void);
266
267 /* Additional cache information local to this file.  Log2 of some of the
268    above numbers.  */
269 static int      arm_dcache_l2_nsets;
270 static int      arm_dcache_l2_assoc;
271 static int      arm_dcache_l2_linesize;
272
273 static void
274 get_cachetype_cp15(void)
275 {
276         u_int ctype, isize, dsize, cpuid;
277         u_int clevel, csize, i, sel;
278         u_int multiplier;
279         u_char type;
280
281         ctype = cp15_ctr_get();
282         cpuid = cp15_midr_get();
283         /*
284          * ...and thus spake the ARM ARM:
285          *
286          * If an <opcode2> value corresponding to an unimplemented or
287          * reserved ID register is encountered, the System Control
288          * processor returns the value of the main ID register.
289          */
290         if (ctype == cpuid)
291                 goto out;
292
293         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
294                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
295                     : "=r" (clevel));
296                 arm_cache_level = clevel;
297                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
298                 i = 0;
299                 while ((type = (clevel & 0x7)) && i < 7) {
300                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
301                             type == CACHE_SEP_CACHE) {
302                                 sel = i << 1;
303                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
304                                     : : "r" (sel));
305                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
306                                     : "=r" (csize));
307                                 arm_cache_type[sel] = csize;
308                                 arm_dcache_align = 1 <<
309                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
310                                 arm_dcache_align_mask = arm_dcache_align - 1;
311                         }
312                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
313                                 sel = (i << 1) | 1;
314                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
315                                     : : "r" (sel));
316                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
317                                     : "=r" (csize));
318                                 arm_cache_type[sel] = csize;
319                         }
320                         i++;
321                         clevel >>= 3;
322                 }
323         } else {
324                 if ((ctype & CPU_CT_S) == 0)
325                         arm_pcache_unified = 1;
326
327                 /*
328                  * If you want to know how this code works, go read the ARM ARM.
329                  */
330
331                 arm_pcache_type = CPU_CT_CTYPE(ctype);
332
333                 if (arm_pcache_unified == 0) {
334                         isize = CPU_CT_ISIZE(ctype);
335                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
336                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
337                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
338                                 if (isize & CPU_CT_xSIZE_M)
339                                         arm_picache_line_size = 0; /* not present */
340                                 else
341                                         arm_picache_ways = 1;
342                         } else {
343                                 arm_picache_ways = multiplier <<
344                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
345                         }
346                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
347                 }
348
349                 dsize = CPU_CT_DSIZE(ctype);
350                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
351                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
352                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
353                         if (dsize & CPU_CT_xSIZE_M)
354                                 arm_pdcache_line_size = 0; /* not present */
355                         else
356                                 arm_pdcache_ways = 1;
357                 } else {
358                         arm_pdcache_ways = multiplier <<
359                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
360                 }
361                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
362
363                 arm_dcache_align = arm_pdcache_line_size;
364
365                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
366                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
367                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
368                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
369
370         out:
371                 arm_dcache_align_mask = arm_dcache_align - 1;
372         }
373 }
374 #endif /* ARM9 || XSCALE */
375
376 /*
377  * Cannot panic here as we may not have a console yet ...
378  */
379
380 int
381 set_cpufuncs(void)
382 {
383         cputype = cp15_midr_get();
384         cputype &= CPU_ID_CPU_MASK;
385
386 #if defined(CPU_ARM9E)
387         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
388             cputype == CPU_ID_MV88FR571_41) {
389                 uint32_t sheeva_ctrl;
390
391                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
392                     MV_L2_ENABLE);
393                 /*
394                  * Workaround for Marvell MV78100 CPU: Cache prefetch
395                  * mechanism may affect the cache coherency validity,
396                  * so it needs to be disabled.
397                  *
398                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
399                  * L2 Prefetching Mechanism) for details.
400                  */
401                 if (cputype == CPU_ID_MV88FR571_VD ||
402                     cputype == CPU_ID_MV88FR571_41)
403                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
404
405                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
406
407                 cpufuncs = sheeva_cpufuncs;
408                 get_cachetype_cp15();
409                 pmap_pte_init_generic();
410                 goto out;
411         } else if (cputype == CPU_ID_ARM926EJS) {
412                 cpufuncs = armv5_ec_cpufuncs;
413                 get_cachetype_cp15();
414                 pmap_pte_init_generic();
415                 goto out;
416         }
417 #endif /* CPU_ARM9E */
418 #if defined(CPU_ARM1176)
419         if (cputype == CPU_ID_ARM1176JZS) {
420                 cpufuncs = arm1176_cpufuncs;
421                 get_cachetype_cp15();
422                 goto out;
423         }
424 #endif /* CPU_ARM1176 */
425 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
426         switch(cputype & CPU_ID_SCHEME_MASK) {
427         case CPU_ID_CORTEXA5:
428         case CPU_ID_CORTEXA7:
429         case CPU_ID_CORTEXA8:
430         case CPU_ID_CORTEXA9:
431         case CPU_ID_CORTEXA12:
432         case CPU_ID_CORTEXA15:
433         case CPU_ID_CORTEXA53:
434         case CPU_ID_CORTEXA57:
435         case CPU_ID_CORTEXA72:
436         case CPU_ID_KRAIT300:
437                 cpufuncs = cortexa_cpufuncs;
438                 get_cachetype_cp15();
439                 goto out;
440         default:
441                 break;
442         }
443 #endif /* CPU_CORTEXA || CPU_KRAIT */
444
445 #if defined(CPU_MV_PJ4B)
446         if (cputype == CPU_ID_MV88SV581X_V7 ||
447             cputype == CPU_ID_MV88SV584X_V7 ||
448             cputype == CPU_ID_ARM_88SV581X_V7) {
449                 cpufuncs = pj4bv7_cpufuncs;
450                 get_cachetype_cp15();
451                 goto out;
452         }
453 #endif /* CPU_MV_PJ4B */
454
455         /*
456          * Bzzzz. And the answer was ...
457          */
458         panic("No support for this CPU type (%08x) in kernel", cputype);
459         return(ARCHITECTURE_NOT_PRESENT);
460 out:
461         uma_set_align(arm_dcache_align_mask);
462         return (0);
463 }
464
465 /*
466  * CPU Setup code
467  */
468
469 #if defined(CPU_ARM9E)
470 static void
471 arm10_setup(void)
472 {
473         int cpuctrl, cpuctrlmask;
474
475         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
476             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
477             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
478         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
479             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
480             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
481             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
482             | CPU_CONTROL_BPRD_ENABLE
483             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
484
485 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
486         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
487 #endif
488
489
490         /* Clear out the cache */
491         cpu_idcache_wbinv_all();
492
493         /* Now really make sure they are clean.  */
494         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
495
496         if (vector_page == ARM_VECTORS_HIGH)
497                 cpuctrl |= CPU_CONTROL_VECRELOC;
498
499         /* Set the control register */
500         cpu_control(0xffffffff, cpuctrl);
501
502         /* And again. */
503         cpu_idcache_wbinv_all();
504 }
505 #endif  /* CPU_ARM9E || CPU_ARM10 */
506
507 #if defined(CPU_ARM1176) \
508  || defined(CPU_MV_PJ4B) \
509  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
510 static __inline void
511 cpu_scc_setup_ccnt(void)
512 {
513 /* This is how you give userland access to the CCNT and PMCn
514  * registers.
515  * BEWARE! This gives write access also, which may not be what
516  * you want!
517  */
518 #ifdef _PMC_USER_READ_WRITE_
519         /* Set PMUSERENR[0] to allow userland access */
520         cp15_pmuserenr_set(1);
521 #endif
522 #if defined(CPU_ARM1176)
523         /* Set PMCR[2,0] to enable counters and reset CCNT */
524         cp15_pmcr_set(5);
525 #else
526         /* Set up the PMCCNTR register as a cyclecounter:
527          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
528          * Set PMCR[2,0] to enable counters and reset CCNT
529          * Set PMCNTENSET to 0x80000000 to enable CCNT */
530         cp15_pminten_clr(0xFFFFFFFF);
531         cp15_pmcr_set(5);
532         cp15_pmcnten_set(0x80000000);
533 #endif
534 }
535 #endif
536
537 #if defined(CPU_ARM1176)
538 static void
539 arm11x6_setup(void)
540 {
541         uint32_t auxctrl, auxctrl_wax;
542         uint32_t tmp, tmp2;
543         uint32_t cpuid;
544
545         cpuid = cp15_midr_get();
546
547         auxctrl = 0;
548         auxctrl_wax = ~0;
549
550         /*
551          * Enable an errata workaround
552          */
553         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
554                 auxctrl = ARM1176_AUXCTL_PHD;
555                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
556         }
557
558         tmp = cp15_actlr_get();
559         tmp2 = tmp;
560         tmp &= auxctrl_wax;
561         tmp |= auxctrl;
562         if (tmp != tmp2)
563                 cp15_actlr_set(tmp);
564
565         cpu_scc_setup_ccnt();
566 }
567 #endif  /* CPU_ARM1176 */
568
569 #ifdef CPU_MV_PJ4B
570 static void
571 pj4bv7_setup(void)
572 {
573
574         pj4b_config();
575         cpu_scc_setup_ccnt();
576 }
577 #endif /* CPU_MV_PJ4B */
578
579 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
580 static void
581 cortexa_setup(void)
582 {
583
584         cpu_scc_setup_ccnt();
585 }
586 #endif  /* CPU_CORTEXA || CPU_KRAIT */