2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008 Marcel Moolenaar
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
40 #include <machine/bus.h>
41 #include <machine/cpu.h>
42 #include <machine/hid.h>
43 #include <machine/intr_machdep.h>
44 #include <machine/pcb.h>
45 #include <machine/psl.h>
46 #include <machine/smp.h>
47 #include <machine/spr.h>
48 #include <machine/trap.h>
50 #include <dev/ofw/openfirm.h>
51 #include <machine/ofw_machdep.h>
55 static register_t bsp_state[8] __aligned(8);
57 static void cpudep_save_config(void *dummy);
58 SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
61 cpudep_ap_early_bootstrap(void)
67 switch (mfpvr() >> 16) {
71 /* Restore HID4 and HID5, which are necessary for the MMU */
74 mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
75 mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
77 __asm __volatile("ld %0, 16(%2); sync; isync; \
78 mtspr %1, %0; sync; isync;"
79 : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
80 __asm __volatile("ld %0, 24(%2); sync; isync; \
81 mtspr %1, %0; sync; isync;"
82 : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
90 if (mfmsr() & PSL_HV) {
93 * Direct interrupts to SRR instead of HSRR and
94 * reset LPCR otherwise
99 mtspr(SPR_LPCR, lpcr);
103 * Nuke FSCR, to be managed on a per-process basis
112 __asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
117 cpudep_ap_bootstrap(void)
121 msr = psl_kernset & ~PSL_EE;
124 pcpup->pc_curthread = pcpup->pc_idlethread;
126 __asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
128 __asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
130 pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
131 sp = pcpup->pc_curpcb->pcb_sp;
137 mpc74xx_l2_enable(register_t l2cr_config)
142 vers = mfpvr() >> 16;
153 ccr = mfspr(SPR_L2CR);
157 /* Configure L2 cache. */
158 ccr = l2cr_config & ~L2CR_L2E;
159 mtspr(SPR_L2CR, ccr | L2CR_L2I);
161 ccr = mfspr(SPR_L2CR);
164 mtspr(SPR_L2CR, l2cr_config);
167 return (l2cr_config);
171 mpc745x_l3_enable(register_t l3cr_config)
175 ccr = mfspr(SPR_L3CR);
179 /* Configure L3 cache. */
180 ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
181 mtspr(SPR_L3CR, ccr);
182 ccr |= 0x4000000; /* Magic, but documented. */
183 mtspr(SPR_L3CR, ccr);
185 mtspr(SPR_L3CR, ccr);
186 mtspr(SPR_L3CR, ccr | L3CR_L3I);
187 while (mfspr(SPR_L3CR) & L3CR_L3I)
189 mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
192 mtspr(SPR_L3CR, ccr);
196 mtspr(SPR_L3CR, ccr);
203 mpc74xx_l1d_enable(void)
207 hid = mfspr(SPR_HID0);
211 /* Enable L1 D-cache */
214 mtspr(SPR_HID0, hid | HID0_DCFI);
221 mpc74xx_l1i_enable(void)
225 hid = mfspr(SPR_HID0);
229 /* Enable L1 I-cache */
232 mtspr(SPR_HID0, hid | HID0_ICFI);
239 cpudep_save_config(void *dummy)
243 vers = mfpvr() >> 16;
250 bsp_state[0] = mfspr(SPR_HID0);
251 bsp_state[1] = mfspr(SPR_HID1);
252 bsp_state[2] = mfspr(SPR_HID4);
253 bsp_state[3] = mfspr(SPR_HID5);
255 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
256 : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
257 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
258 : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
259 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
260 : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
261 __asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
262 : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
269 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */
270 if (mfmsr() & PSL_HV) {
271 bsp_state[0] = mfspr(SPR_HID0);
272 bsp_state[1] = mfspr(SPR_HID1);
273 bsp_state[2] = mfspr(SPR_HID4);
274 bsp_state[3] = mfspr(SPR_HID6);
276 bsp_state[4] = mfspr(SPR_CELL_TSCR);
280 bsp_state[5] = mfspr(SPR_CELL_TSRL);
286 /* Only MPC745x CPUs have an L3 cache. */
287 bsp_state[3] = mfspr(SPR_L3CR);
294 bsp_state[2] = mfspr(SPR_L2CR);
295 bsp_state[1] = mfspr(SPR_HID1);
296 bsp_state[0] = mfspr(SPR_HID0);
307 vers = mfpvr() >> 16;
309 /* The following is needed for restoring from sleep. */
310 platform_smp_timebase_sync(0, 1);
317 __asm __volatile("mtspr 311,%0" :: "r"(0));
321 * The 970 has strange rules about how to update HID registers.
322 * See Table 2-3, 970MP manual
324 * Note: HID4 and HID5 restored already in
325 * cpudep_ap_early_bootstrap()
328 __asm __volatile("mtasr %0; sync" :: "r"(0));
333 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \
334 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \
336 :: "r"(bsp_state[0]), "K"(SPR_HID0));
337 __asm __volatile("sync; isync; \
338 mtspr %1, %0; mtspr %1, %0; sync; isync"
339 :: "r"(bsp_state[1]), "K"(SPR_HID1));
345 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \
346 mfspr %0, %1; mfspr %0, %1; mfspr %0, %1; \
348 : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
349 __asm __volatile("ld %0, 8(%2); sync; isync; \
350 mtspr %1, %0; mtspr %1, %0; sync; isync"
351 : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
357 #ifdef NOTYET /* Causes problems if in instruction stream on 970 */
358 if (mfmsr() & PSL_HV) {
359 mtspr(SPR_HID0, bsp_state[0]);
360 mtspr(SPR_HID1, bsp_state[1]);
361 mtspr(SPR_HID4, bsp_state[2]);
362 mtspr(SPR_HID6, bsp_state[3]);
364 mtspr(SPR_CELL_TSCR, bsp_state[4]);
368 mtspr(SPR_CELL_TSRL, bsp_state[5]);
378 /* XXX: Program the CPU ID into PIR */
379 __asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
384 mtspr(SPR_HID0, bsp_state[0]); isync();
385 mtspr(SPR_HID1, bsp_state[1]); isync();
387 /* Now enable the L3 cache. */
392 /* Only MPC745x CPUs have an L3 cache. */
393 reg = mpc745x_l3_enable(bsp_state[3]);
398 reg = mpc74xx_l2_enable(bsp_state[2]);
399 reg = mpc74xx_l1d_enable();
400 reg = mpc74xx_l1i_enable();
409 if (mfmsr() & PSL_HV) {
410 mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr |
418 if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
421 printf("WARNING: Unknown CPU type. Cache performace may be "