]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
ARM: For ARMv6/v7, code in locore.S initializes SCTLR and ACTRL registers.
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
65 #endif
66
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
70 #endif
71
72 /* PRIMARY CACHE VARIABLES */
73 int     arm_picache_size;
74 int     arm_picache_line_size;
75 int     arm_picache_ways;
76
77 int     arm_pdcache_size;       /* and unified */
78 int     arm_pdcache_line_size;
79 int     arm_pdcache_ways;
80
81 int     arm_pcache_type;
82 int     arm_pcache_unified;
83
84 int     arm_dcache_align;
85 int     arm_dcache_align_mask;
86
87 u_int   arm_cache_level;
88 u_int   arm_cache_type[14];
89 u_int   arm_cache_loc;
90
91 #ifdef CPU_ARM9
92 struct cpu_functions arm9_cpufuncs = {
93         /* CPU functions */
94
95         cpufunc_nullop,                 /* cpwait               */
96
97         /* MMU functions */
98
99         cpufunc_control,                /* control              */
100         arm9_setttb,                    /* Setttb               */
101
102         /* TLB functions */
103
104         armv4_tlb_flushID,              /* tlb_flushID          */
105         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
106         armv4_tlb_flushD,               /* tlb_flushD           */
107         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
108
109         /* Cache operations */
110
111         arm9_icache_sync_range,         /* icache_sync_range    */
112
113         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
114         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
115         arm9_dcache_inv_range,          /* dcache_inv_range     */
116         arm9_dcache_wb_range,           /* dcache_wb_range      */
117
118         armv4_idcache_inv_all,          /* idcache_inv_all      */
119         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
120         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
121         cpufunc_nullop,                 /* l2cache_wbinv_all    */
122         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
123         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
124         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
125         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
126
127         /* Other functions */
128
129         armv4_drain_writebuf,           /* drain_writebuf       */
130
131         (void *)cpufunc_nullop,         /* sleep                */
132
133         /* Soft functions */
134
135         arm9_context_switch,            /* context_switch       */
136
137         arm9_setup                      /* cpu setup            */
138
139 };
140 #endif /* CPU_ARM9 */
141
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
144         /* CPU functions */
145
146         cpufunc_nullop,                 /* cpwait               */
147
148         /* MMU functions */
149
150         cpufunc_control,                /* control              */
151         armv5_ec_setttb,                /* Setttb               */
152
153         /* TLB functions */
154
155         armv4_tlb_flushID,              /* tlb_flushID          */
156         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
157         armv4_tlb_flushD,               /* tlb_flushD           */
158         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
159
160         /* Cache operations */
161
162         armv5_ec_icache_sync_range,     /* icache_sync_range    */
163
164         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
165         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
166         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
167         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
168
169         armv4_idcache_inv_all,          /* idcache_inv_all      */
170         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
171         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
172
173         cpufunc_nullop,                 /* l2cache_wbinv_all    */
174         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
175         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
176         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
177         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
178
179         /* Other functions */
180
181         armv4_drain_writebuf,           /* drain_writebuf       */
182
183         (void *)cpufunc_nullop,         /* sleep                */
184
185         /* Soft functions */
186
187         arm9_context_switch,            /* context_switch       */
188
189         arm10_setup                     /* cpu setup            */
190
191 };
192
193 struct cpu_functions sheeva_cpufuncs = {
194         /* CPU functions */
195
196         cpufunc_nullop,                 /* cpwait               */
197
198         /* MMU functions */
199
200         cpufunc_control,                /* control              */
201         sheeva_setttb,                  /* Setttb               */
202
203         /* TLB functions */
204
205         armv4_tlb_flushID,              /* tlb_flushID          */
206         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
207         armv4_tlb_flushD,               /* tlb_flushD           */
208         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
209
210         /* Cache operations */
211
212         armv5_ec_icache_sync_range,     /* icache_sync_range    */
213
214         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
215         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
216         sheeva_dcache_inv_range,        /* dcache_inv_range     */
217         sheeva_dcache_wb_range,         /* dcache_wb_range      */
218
219         armv4_idcache_inv_all,          /* idcache_inv_all      */
220         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
221         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
222
223         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
224         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
225         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
226         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
227         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
228
229         /* Other functions */
230
231         armv4_drain_writebuf,           /* drain_writebuf       */
232
233         sheeva_cpu_sleep,               /* sleep                */
234
235         /* Soft functions */
236
237         arm9_context_switch,            /* context_switch       */
238
239         arm10_setup                     /* cpu setup            */
240 };
241 #endif /* CPU_ARM9E */
242
243 #ifdef CPU_MV_PJ4B
244 struct cpu_functions pj4bv7_cpufuncs = {
245         /* CPU functions */
246
247         armv7_drain_writebuf,           /* cpwait               */
248
249         /* MMU functions */
250
251         cpufunc_control,                /* control              */
252         armv7_setttb,                   /* Setttb               */
253
254         /* TLB functions */
255
256         armv7_tlb_flushID,              /* tlb_flushID          */
257         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
258         armv7_tlb_flushID,              /* tlb_flushD           */
259         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
260
261         /* Cache operations */
262         armv7_icache_sync_range,        /* icache_sync_range    */
263
264         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
265         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
266         armv7_dcache_inv_range,         /* dcache_inv_range     */
267         armv7_dcache_wb_range,          /* dcache_wb_range      */
268
269         armv7_idcache_inv_all,          /* idcache_inv_all      */
270         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
271         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
272
273         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
274         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
275         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
276         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
277         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
278
279         /* Other functions */
280
281         armv7_drain_writebuf,           /* drain_writebuf       */
282
283         (void *)cpufunc_nullop,         /* sleep                */
284
285         /* Soft functions */
286         armv7_context_switch,           /* context_switch       */
287
288         pj4bv7_setup                    /* cpu setup            */
289 };
290 #endif /* CPU_MV_PJ4B */
291
292 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
293
294 struct cpu_functions xscale_cpufuncs = {
295         /* CPU functions */
296
297         xscale_cpwait,                  /* cpwait               */
298
299         /* MMU functions */
300
301         xscale_control,                 /* control              */
302         xscale_setttb,                  /* setttb               */
303
304         /* TLB functions */
305
306         armv4_tlb_flushID,              /* tlb_flushID          */
307         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
308         armv4_tlb_flushD,               /* tlb_flushD           */
309         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
310
311         /* Cache operations */
312
313         xscale_cache_syncI_rng,         /* icache_sync_range    */
314
315         xscale_cache_purgeD,            /* dcache_wbinv_all     */
316         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
317         xscale_cache_flushD_rng,        /* dcache_inv_range     */
318         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
319
320         xscale_cache_flushID,           /* idcache_inv_all      */
321         xscale_cache_purgeID,           /* idcache_wbinv_all    */
322         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
323         cpufunc_nullop,                 /* l2cache_wbinv_all    */
324         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
325         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
326         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
327         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
328
329         /* Other functions */
330
331         armv4_drain_writebuf,           /* drain_writebuf       */
332
333         xscale_cpu_sleep,               /* sleep                */
334
335         /* Soft functions */
336
337         xscale_context_switch,          /* context_switch       */
338
339         xscale_setup                    /* cpu setup            */
340 };
341 #endif
342 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
343
344 #ifdef CPU_XSCALE_81342
345 struct cpu_functions xscalec3_cpufuncs = {
346         /* CPU functions */
347
348         xscale_cpwait,                  /* cpwait               */
349
350         /* MMU functions */
351
352         xscale_control,                 /* control              */
353         xscalec3_setttb,                /* setttb               */
354
355         /* TLB functions */
356
357         armv4_tlb_flushID,              /* tlb_flushID          */
358         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
359         armv4_tlb_flushD,               /* tlb_flushD           */
360         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
361
362         /* Cache operations */
363
364         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
365
366         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
367         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
368         xscale_cache_flushD_rng,        /* dcache_inv_range     */
369         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
370
371         xscale_cache_flushID,           /* idcache_inv_all      */
372         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
373         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
374         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
375         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
376         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
377         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
378         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
379
380         /* Other functions */
381
382         armv4_drain_writebuf,           /* drain_writebuf       */
383
384         xscale_cpu_sleep,               /* sleep                */
385
386         /* Soft functions */
387
388         xscalec3_context_switch,        /* context_switch       */
389
390         xscale_setup                    /* cpu setup            */
391 };
392 #endif /* CPU_XSCALE_81342 */
393
394
395 #if defined(CPU_FA526)
396 struct cpu_functions fa526_cpufuncs = {
397         /* CPU functions */
398
399         cpufunc_nullop,                 /* cpwait               */
400
401         /* MMU functions */
402
403         cpufunc_control,                /* control              */
404         fa526_setttb,                   /* setttb               */
405
406         /* TLB functions */
407
408         armv4_tlb_flushID,              /* tlb_flushID          */
409         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
410         armv4_tlb_flushD,               /* tlb_flushD           */
411         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
412
413         /* Cache operations */
414
415         fa526_icache_sync_range,        /* icache_sync_range    */
416
417         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
418         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
419         fa526_dcache_inv_range,         /* dcache_inv_range     */
420         fa526_dcache_wb_range,          /* dcache_wb_range      */
421
422         armv4_idcache_inv_all,          /* idcache_inv_all      */
423         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
424         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
425         cpufunc_nullop,                 /* l2cache_wbinv_all    */
426         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
427         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
428         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
429         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
430
431         /* Other functions */
432
433         armv4_drain_writebuf,           /* drain_writebuf       */
434
435         fa526_cpu_sleep,                /* sleep                */
436
437         /* Soft functions */
438
439
440         fa526_context_switch,           /* context_switch       */
441
442         fa526_setup                     /* cpu setup            */
443 };
444 #endif  /* CPU_FA526 */
445
446 #if defined(CPU_ARM1176)
447 struct cpu_functions arm1176_cpufuncs = {
448         /* CPU functions */
449
450         cpufunc_nullop,                 /* cpwait               */
451
452         /* MMU functions */
453
454         cpufunc_control,                /* control              */
455         arm11x6_setttb,                 /* Setttb               */
456
457         /* TLB functions */
458
459         arm11_tlb_flushID,              /* tlb_flushID          */
460         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
461         arm11_tlb_flushD,               /* tlb_flushD           */
462         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
463
464         /* Cache operations */
465
466         arm11x6_icache_sync_range,      /* icache_sync_range    */
467
468         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
469         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
470         armv6_dcache_inv_range,         /* dcache_inv_range     */
471         armv6_dcache_wb_range,          /* dcache_wb_range      */
472
473         armv6_idcache_inv_all,          /* idcache_inv_all      */
474         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
475         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
476
477         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
478         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
479         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
480         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
481         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
482
483         /* Other functions */
484
485         arm11_drain_writebuf,           /* drain_writebuf       */
486
487         arm11x6_sleep,                  /* sleep                */
488
489         /* Soft functions */
490
491         arm11_context_switch,           /* context_switch       */
492
493         arm11x6_setup                   /* cpu setup            */
494 };
495 #endif /*CPU_ARM1176 */
496
497 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
498 struct cpu_functions cortexa_cpufuncs = {
499         /* CPU functions */
500
501         cpufunc_nullop,                 /* cpwait               */
502
503         /* MMU functions */
504
505         cpufunc_control,                /* control              */
506         armv7_setttb,                   /* Setttb               */
507
508         /*
509          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
510          * whether the hardware implements separate I+D or not, so we use the
511          * same 'ID' functions for all 3 variations.
512          */
513
514         armv7_tlb_flushID,              /* tlb_flushID          */
515         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
516         armv7_tlb_flushID,              /* tlb_flushD           */
517         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
518
519         /* Cache operations */
520
521         armv7_icache_sync_range,        /* icache_sync_range    */
522
523         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
524         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
525         armv7_dcache_inv_range,         /* dcache_inv_range     */
526         armv7_dcache_wb_range,          /* dcache_wb_range      */
527
528         armv7_idcache_inv_all,          /* idcache_inv_all      */
529         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
530         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
531
532         /*
533          * Note: For CPUs using the PL310 the L2 ops are filled in when the
534          * L2 cache controller is actually enabled.
535          */
536         cpufunc_nullop,                 /* l2cache_wbinv_all    */
537         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
538         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
539         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
540         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
541
542         /* Other functions */
543
544         armv7_drain_writebuf,           /* drain_writebuf       */
545
546         armv7_cpu_sleep,                /* sleep                */
547
548         /* Soft functions */
549
550         armv7_context_switch,           /* context_switch       */
551
552         cortexa_setup                     /* cpu setup            */
553 };
554 #endif /* CPU_CORTEXA */
555
556 /*
557  * Global constants also used by locore.s
558  */
559
560 struct cpu_functions cpufuncs;
561 u_int cputype;
562 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
563
564 #if defined(CPU_ARM9) ||        \
565   defined (CPU_ARM9E) ||        \
566   defined(CPU_ARM1176) ||       \
567   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
568   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
569   defined(CPU_XSCALE_81342) || \
570   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
571
572 /* Global cache line sizes, use 32 as default */
573 int     arm_dcache_min_line_size = 32;
574 int     arm_icache_min_line_size = 32;
575 int     arm_idcache_min_line_size = 32;
576
577 static void get_cachetype_cp15(void);
578
579 /* Additional cache information local to this file.  Log2 of some of the
580    above numbers.  */
581 static int      arm_dcache_l2_nsets;
582 static int      arm_dcache_l2_assoc;
583 static int      arm_dcache_l2_linesize;
584
585 static void
586 get_cachetype_cp15()
587 {
588         u_int ctype, isize, dsize, cpuid;
589         u_int clevel, csize, i, sel;
590         u_int multiplier;
591         u_char type;
592
593         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
594                 : "=r" (ctype));
595
596         cpuid = cpu_ident();
597         /*
598          * ...and thus spake the ARM ARM:
599          *
600          * If an <opcode2> value corresponding to an unimplemented or
601          * reserved ID register is encountered, the System Control
602          * processor returns the value of the main ID register.
603          */
604         if (ctype == cpuid)
605                 goto out;
606
607         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
608                 /* Resolve minimal cache line sizes */
609                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
610                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
611                 arm_idcache_min_line_size =
612                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
613
614                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
615                     : "=r" (clevel));
616                 arm_cache_level = clevel;
617                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
618                 i = 0;
619                 while ((type = (clevel & 0x7)) && i < 7) {
620                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
621                             type == CACHE_SEP_CACHE) {
622                                 sel = i << 1;
623                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
624                                     : : "r" (sel));
625                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
626                                     : "=r" (csize));
627                                 arm_cache_type[sel] = csize;
628                                 arm_dcache_align = 1 <<
629                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
630                                 arm_dcache_align_mask = arm_dcache_align - 1;
631                         }
632                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
633                                 sel = (i << 1) | 1;
634                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
635                                     : : "r" (sel));
636                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
637                                     : "=r" (csize));
638                                 arm_cache_type[sel] = csize;
639                         }
640                         i++;
641                         clevel >>= 3;
642                 }
643         } else {
644                 if ((ctype & CPU_CT_S) == 0)
645                         arm_pcache_unified = 1;
646
647                 /*
648                  * If you want to know how this code works, go read the ARM ARM.
649                  */
650
651                 arm_pcache_type = CPU_CT_CTYPE(ctype);
652
653                 if (arm_pcache_unified == 0) {
654                         isize = CPU_CT_ISIZE(ctype);
655                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
656                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
657                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
658                                 if (isize & CPU_CT_xSIZE_M)
659                                         arm_picache_line_size = 0; /* not present */
660                                 else
661                                         arm_picache_ways = 1;
662                         } else {
663                                 arm_picache_ways = multiplier <<
664                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
665                         }
666                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
667                 }
668
669                 dsize = CPU_CT_DSIZE(ctype);
670                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
671                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
672                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
673                         if (dsize & CPU_CT_xSIZE_M)
674                                 arm_pdcache_line_size = 0; /* not present */
675                         else
676                                 arm_pdcache_ways = 1;
677                 } else {
678                         arm_pdcache_ways = multiplier <<
679                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
680                 }
681                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
682
683                 arm_dcache_align = arm_pdcache_line_size;
684
685                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
686                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
687                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
688                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
689
690         out:
691                 arm_dcache_align_mask = arm_dcache_align - 1;
692         }
693 }
694 #endif /* ARM9 || XSCALE */
695
696 /*
697  * Cannot panic here as we may not have a console yet ...
698  */
699
700 int
701 set_cpufuncs()
702 {
703         cputype = cpu_ident();
704         cputype &= CPU_ID_CPU_MASK;
705
706 #ifdef CPU_ARM9
707         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
708              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
709             (cputype & 0x0000f000) == 0x00009000) {
710                 cpufuncs = arm9_cpufuncs;
711                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
712                 get_cachetype_cp15();
713                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
714                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
715                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
716                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
717                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
718                 pmap_pte_init_generic();
719                 goto out;
720         }
721 #endif /* CPU_ARM9 */
722 #if defined(CPU_ARM9E)
723         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
724             cputype == CPU_ID_MV88FR571_41) {
725                 uint32_t sheeva_ctrl;
726
727                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
728                     MV_L2_ENABLE);
729                 /*
730                  * Workaround for Marvell MV78100 CPU: Cache prefetch
731                  * mechanism may affect the cache coherency validity,
732                  * so it needs to be disabled.
733                  *
734                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
735                  * L2 Prefetching Mechanism) for details.
736                  */
737                 if (cputype == CPU_ID_MV88FR571_VD ||
738                     cputype == CPU_ID_MV88FR571_41)
739                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
740
741                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
742
743                 cpufuncs = sheeva_cpufuncs;
744                 get_cachetype_cp15();
745                 pmap_pte_init_generic();
746                 goto out;
747         } else if (cputype == CPU_ID_ARM926EJS) {
748                 cpufuncs = armv5_ec_cpufuncs;
749                 get_cachetype_cp15();
750                 pmap_pte_init_generic();
751                 goto out;
752         }
753 #endif /* CPU_ARM9E */
754 #if defined(CPU_ARM1176)
755         if (cputype == CPU_ID_ARM1176JZS) {
756                 cpufuncs = arm1176_cpufuncs;
757                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
758                 get_cachetype_cp15();
759                 goto out;
760         }
761 #endif /* CPU_ARM1176 */
762 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
763         if (cputype == CPU_ID_CORTEXA5 ||
764             cputype == CPU_ID_CORTEXA7 ||
765             cputype == CPU_ID_CORTEXA8R1 ||
766             cputype == CPU_ID_CORTEXA8R2 ||
767             cputype == CPU_ID_CORTEXA8R3 ||
768             cputype == CPU_ID_CORTEXA9R1 ||
769             cputype == CPU_ID_CORTEXA9R2 ||
770             cputype == CPU_ID_CORTEXA9R3 ||
771             cputype == CPU_ID_CORTEXA9R4 ||
772             cputype == CPU_ID_CORTEXA12R0 ||
773             cputype == CPU_ID_CORTEXA15R0 ||
774             cputype == CPU_ID_CORTEXA15R1 ||
775             cputype == CPU_ID_CORTEXA15R2 ||
776             cputype == CPU_ID_CORTEXA15R3 ||
777             cputype == CPU_ID_KRAIT300R0 ||
778             cputype == CPU_ID_KRAIT300R1 ) {
779                 cpufuncs = cortexa_cpufuncs;
780                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
781                 get_cachetype_cp15();
782                 goto out;
783         }
784 #endif /* CPU_CORTEXA */
785
786 #if defined(CPU_MV_PJ4B)
787         if (cputype == CPU_ID_MV88SV581X_V7 ||
788             cputype == CPU_ID_MV88SV584X_V7 ||
789             cputype == CPU_ID_ARM_88SV581X_V7) {
790                 cpufuncs = pj4bv7_cpufuncs;
791                 get_cachetype_cp15();
792                 goto out;
793         }
794 #endif /* CPU_MV_PJ4B */
795
796 #if defined(CPU_FA526)
797         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
798                 cpufuncs = fa526_cpufuncs;
799                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
800                 get_cachetype_cp15();
801                 pmap_pte_init_generic();
802
803                 goto out;
804         }
805 #endif  /* CPU_FA526 */
806
807 #if defined(CPU_XSCALE_81342)
808         if (cputype == CPU_ID_81342) {
809                 cpufuncs = xscalec3_cpufuncs;
810                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
811                 get_cachetype_cp15();
812                 pmap_pte_init_xscale();
813                 goto out;
814         }
815 #endif /* CPU_XSCALE_81342 */
816 #ifdef CPU_XSCALE_PXA2X0
817         /* ignore core revision to test PXA2xx CPUs */
818         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
819             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
820             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
821
822                 cpufuncs = xscale_cpufuncs;
823                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
824                 get_cachetype_cp15();
825                 pmap_pte_init_xscale();
826
827                 goto out;
828         }
829 #endif /* CPU_XSCALE_PXA2X0 */
830 #ifdef CPU_XSCALE_IXP425
831         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
832             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
833
834                 cpufuncs = xscale_cpufuncs;
835                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
836                 get_cachetype_cp15();
837                 pmap_pte_init_xscale();
838
839                 goto out;
840         }
841 #endif /* CPU_XSCALE_IXP425 */
842         /*
843          * Bzzzz. And the answer was ...
844          */
845         panic("No support for this CPU type (%08x) in kernel", cputype);
846         return(ARCHITECTURE_NOT_PRESENT);
847 out:
848         uma_set_align(arm_dcache_align_mask);
849         return (0);
850 }
851
852 /*
853  * CPU Setup code
854  */
855
856 #ifdef CPU_ARM9
857 void
858 arm9_setup(void)
859 {
860         int cpuctrl, cpuctrlmask;
861
862         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
863             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
864             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
865             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
866             CPU_CONTROL_ROUNDROBIN;
867         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
868                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
869                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
870                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
871                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
872                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
873                  | CPU_CONTROL_ROUNDROBIN;
874
875 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
876         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
877 #endif
878
879 #ifdef __ARMEB__
880         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
881 #endif
882         if (vector_page == ARM_VECTORS_HIGH)
883                 cpuctrl |= CPU_CONTROL_VECRELOC;
884
885         /* Clear out the cache */
886         cpu_idcache_wbinv_all();
887
888         /* Set the control register */
889         cpu_control(cpuctrlmask, cpuctrl);
890
891 }
892 #endif  /* CPU_ARM9 */
893
894 #if defined(CPU_ARM9E)
895 void
896 arm10_setup(void)
897 {
898         int cpuctrl, cpuctrlmask;
899
900         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
901             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
902             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
903         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
904             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
905             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
906             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
907             | CPU_CONTROL_BPRD_ENABLE
908             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
909
910 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
911         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
912 #endif
913
914 #ifdef __ARMEB__
915         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
916 #endif
917
918         /* Clear out the cache */
919         cpu_idcache_wbinv_all();
920
921         /* Now really make sure they are clean.  */
922         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
923
924         if (vector_page == ARM_VECTORS_HIGH)
925                 cpuctrl |= CPU_CONTROL_VECRELOC;
926
927         /* Set the control register */
928         cpu_control(0xffffffff, cpuctrl);
929
930         /* And again. */
931         cpu_idcache_wbinv_all();
932 }
933 #endif  /* CPU_ARM9E || CPU_ARM10 */
934
935 #if defined(CPU_ARM1176) \
936  || defined(CPU_MV_PJ4B) \
937  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
938 static __inline void
939 cpu_scc_setup_ccnt(void)
940 {
941 /* This is how you give userland access to the CCNT and PMCn
942  * registers.
943  * BEWARE! This gives write access also, which may not be what
944  * you want!
945  */
946 #ifdef _PMC_USER_READ_WRITE_
947         /* Set PMUSERENR[0] to allow userland access */
948         cp15_pmuserenr_set(1);
949 #endif
950 #if defined(CPU_ARM1176)
951         /* Set PMCR[2,0] to enable counters and reset CCNT */
952         cp15_pmcr_set(5);
953 #else
954         /* Set up the PMCCNTR register as a cyclecounter:
955          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
956          * Set PMCR[2,0] to enable counters and reset CCNT
957          * Set PMCNTENSET to 0x80000000 to enable CCNT */
958         cp15_pminten_clr(0xFFFFFFFF);
959         cp15_pmcr_set(5);
960         cp15_pmcnten_set(0x80000000);
961 #endif
962 }
963 #endif
964
965 #if defined(CPU_ARM1176)
966 void
967 arm11x6_setup(void)
968 {
969         uint32_t auxctrl, auxctrl_wax;
970         uint32_t tmp, tmp2;
971         uint32_t cpuid;
972
973         cpuid = cpu_ident();
974
975         auxctrl = 0;
976         auxctrl_wax = ~0;
977
978         /*
979          * Enable an errata workaround
980          */
981         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
982                 auxctrl = ARM1176_AUXCTL_PHD;
983                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
984         }
985
986         tmp = cp15_actlr_get();
987         tmp2 = tmp;
988         tmp &= auxctrl_wax;
989         tmp |= auxctrl;
990         if (tmp != tmp2)
991                 cp15_actlr_set(tmp);
992
993         cpu_scc_setup_ccnt();
994 }
995 #endif  /* CPU_ARM1176 */
996
997 #ifdef CPU_MV_PJ4B
998 void
999 pj4bv7_setup(void)
1000 {
1001
1002         pj4b_config();
1003         cpu_scc_setup_ccnt();
1004 }
1005 #endif /* CPU_MV_PJ4B */
1006
1007 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1008
1009 void
1010 cortexa_setup(void)
1011 {
1012
1013         cpu_scc_setup_ccnt();
1014 }
1015 #endif  /* CPU_CORTEXA */
1016
1017 #if defined(CPU_FA526)
1018 void
1019 fa526_setup(void)
1020 {
1021         int cpuctrl, cpuctrlmask;
1022
1023         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1024                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1025                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1026                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1027                 | CPU_CONTROL_BPRD_ENABLE;
1028         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1029                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1030                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1031                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1032                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1033                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1034                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1035
1036 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1037         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1038 #endif
1039
1040 #ifdef __ARMEB__
1041         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1042 #endif
1043
1044         if (vector_page == ARM_VECTORS_HIGH)
1045                 cpuctrl |= CPU_CONTROL_VECRELOC;
1046
1047         /* Clear out the cache */
1048         cpu_idcache_wbinv_all();
1049
1050         /* Set the control register */
1051         cpu_control(0xffffffff, cpuctrl);
1052 }
1053 #endif  /* CPU_FA526 */
1054
1055 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1056   defined(CPU_XSCALE_81342)
1057 void
1058 xscale_setup(void)
1059 {
1060         uint32_t auxctl;
1061         int cpuctrl, cpuctrlmask;
1062
1063         /*
1064          * The XScale Write Buffer is always enabled.  Our option
1065          * is to enable/disable coalescing.  Note that bits 6:3
1066          * must always be enabled.
1067          */
1068
1069         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1070                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1071                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1072                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1073                  | CPU_CONTROL_BPRD_ENABLE;
1074         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1075                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1076                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1077                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1078                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1079                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1080                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1081                  CPU_CONTROL_L2_ENABLE;
1082
1083 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1084         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1085 #endif
1086
1087 #ifdef __ARMEB__
1088         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1089 #endif
1090
1091         if (vector_page == ARM_VECTORS_HIGH)
1092                 cpuctrl |= CPU_CONTROL_VECRELOC;
1093 #ifdef CPU_XSCALE_CORE3
1094         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1095 #endif
1096
1097         /* Clear out the cache */
1098         cpu_idcache_wbinv_all();
1099
1100         /*
1101          * Set the control register.  Note that bits 6:3 must always
1102          * be set to 1.
1103          */
1104 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1105         cpu_control(0xffffffff, cpuctrl);
1106
1107         /* Make sure write coalescing is turned on */
1108         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1109                 : "=r" (auxctl));
1110 #ifdef XSCALE_NO_COALESCE_WRITES
1111         auxctl |= XSCALE_AUXCTL_K;
1112 #else
1113         auxctl &= ~XSCALE_AUXCTL_K;
1114 #endif
1115 #ifdef CPU_XSCALE_CORE3
1116         auxctl |= XSCALE_AUXCTL_LLR;
1117         auxctl |= XSCALE_AUXCTL_MD_MASK;
1118 #endif
1119         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1120                 : : "r" (auxctl));
1121 }
1122 #endif  /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */