2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
61 #include "opt_kstack_pages.h"
62 #include "opt_platform.h"
64 #include <sys/param.h>
66 #include <sys/systm.h>
72 #include <sys/eventhandler.h>
74 #include <sys/imgact.h>
76 #include <sys/kernel.h>
78 #include <sys/linker.h>
80 #include <sys/malloc.h>
82 #include <sys/msgbuf.h>
83 #include <sys/mutex.h>
84 #include <sys/ptrace.h>
85 #include <sys/reboot.h>
86 #include <sys/rwlock.h>
87 #include <sys/signalvar.h>
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
94 #include <sys/vmmeter.h>
95 #include <sys/vnode.h>
97 #include <net/netisr.h>
100 #include <vm/vm_extern.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_map.h>
104 #include <vm/vm_object.h>
105 #include <vm/vm_pager.h>
107 #include <machine/altivec.h>
108 #ifndef __powerpc64__
109 #include <machine/bat.h>
111 #include <machine/cpu.h>
112 #include <machine/elf.h>
113 #include <machine/fpu.h>
114 #include <machine/hid.h>
115 #include <machine/kdb.h>
116 #include <machine/md_var.h>
117 #include <machine/metadata.h>
118 #include <machine/mmuvar.h>
119 #include <machine/pcb.h>
120 #include <machine/reg.h>
121 #include <machine/sigframe.h>
122 #include <machine/spr.h>
123 #include <machine/trap.h>
124 #include <machine/vmparam.h>
125 #include <machine/ofw_machdep.h>
129 #include <dev/ofw/openfirm.h>
132 #include "mmu_oea64.h"
135 #ifndef __powerpc64__
136 struct bat battable[16];
139 #ifndef __powerpc64__
140 /* Bits for running on 64-bit systems in 32-bit mode. */
141 extern void *testppc64, *testppc64size;
142 extern void *restorebridge, *restorebridgesize;
143 extern void *rfid_patch, *rfi_patch1, *rfi_patch2;
144 extern void *trapcode64;
146 extern Elf_Addr _GLOBAL_OFFSET_TABLE_[];
149 extern void *rstcode, *rstcodeend;
150 extern void *trapcode, *trapcodeend;
151 extern void *hypertrapcode, *hypertrapcodeend;
152 extern void *generictrap, *generictrap64;
153 extern void *alitrap, *aliend;
154 extern void *dsitrap, *dsiend;
155 extern void *decrint, *decrsize;
156 extern void *extint, *extsize;
157 extern void *dblow, *dbend;
158 extern void *imisstrap, *imisssize;
159 extern void *dlmisstrap, *dlmisssize;
160 extern void *dsmisstrap, *dsmisssize;
162 extern void *ap_pcpu;
163 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
165 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
166 void *mdp, uint32_t mdp_cookie);
167 void aim_cpu_init(vm_offset_t toc);
170 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
176 * If running from an FDT, make sure we are in real mode to avoid
177 * tromping on firmware page tables. Everything in the kernel assumes
178 * 1:1 mappings out of firmware, so this won't break anything not
179 * already broken. This doesn't work if there is live OF, since OF
180 * may internally use non-1:1 mappings.
183 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
187 * If in real mode, relocate to high memory so that the kernel
188 * can execute from the direct map.
190 if (!(mfmsr() & PSL_DR) &&
191 (vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS)
192 __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
193 DMAP_BASE_ADDRESS, mfmsr());
196 /* Various very early CPU fix ups */
197 switch (mfpvr() >> 16) {
199 * PowerPC 970 CPUs have a misfeature requested by Apple that
200 * makes them pretend they have a 32-byte cacheline. Turn this
201 * off before we measure the cacheline size.
207 scratch = mfspr(SPR_HID5);
208 scratch &= ~HID5_970_DCBZ_SIZE_HI;
209 mtspr(SPR_HID5, scratch);
218 /* XXX: get from ibm,slb-size in device tree */
226 aim_cpu_init(vm_offset_t toc)
228 size_t trap_offset, trapsize;
231 uint8_t *cache_check;
233 #ifndef __powerpc64__
241 /* General setup for AIM CPUs */
242 psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI;
245 psl_kernset |= PSL_SF;
246 if (mfmsr() & PSL_HV)
247 psl_kernset |= PSL_HV;
249 psl_userset = psl_kernset | PSL_PR;
251 psl_userset32 = psl_userset & ~PSL_SF;
254 /* Bits that users aren't allowed to change */
255 psl_userstatic = ~(PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1);
257 * Mask bits from the SRR1 that aren't really the MSR:
258 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64)
260 psl_userstatic &= ~0x783f0000UL;
263 * Initialize the interrupt tables and figure out our cache line
264 * size and whether or not we need the 64-bit bridge code.
268 * Disable translation in case the vector area hasn't been
269 * mapped (G5). Note that no OFW calls can be made until
270 * translation is re-enabled.
274 mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
277 * Measure the cacheline size using dcbz
279 * Use EXC_PGM as a playground. We are about to overwrite it
280 * anyway, we know it exists, and we know it is cache-aligned.
283 cache_check = (void *)EXC_PGM;
285 for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
286 cache_check[cacheline_size] = 0xff;
288 __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
290 /* Find the first byte dcbz did not zero to get the cache line size */
291 for (cacheline_size = 0; cacheline_size < 0x100 &&
292 cache_check[cacheline_size] == 0; cacheline_size++);
294 /* Work around psim bug */
295 if (cacheline_size == 0) {
300 #ifndef __powerpc64__
302 * Figure out whether we need to use the 64 bit PMAP. This works by
303 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
304 * and setting ppc64 = 0 if that causes a trap.
309 bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size);
310 __syncicache((void *)EXC_PGM, (size_t)&testppc64size);
318 : "=r"(scratch), "=r"(ppc64));
321 cpu_features |= PPC_FEATURE_64;
324 * Now copy restorebridge into all the handlers, if necessary,
325 * and set up the trap tables.
328 if (cpu_features & PPC_FEATURE_64) {
329 /* Patch the two instances of rfi -> rfid */
330 bcopy(&rfid_patch,&rfi_patch1,4);
332 /* rfi_patch2 is at the end of dbleave */
333 bcopy(&rfid_patch,&rfi_patch2,4);
336 #else /* powerpc64 */
337 cpu_features |= PPC_FEATURE_64;
340 trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
343 * Copy generic handler into every possible trap. Special cases will get
344 * different ones in a minute.
346 for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
347 bcopy(&trapcode, (void *)trap, trapsize);
349 #ifndef __powerpc64__
350 if (cpu_features & PPC_FEATURE_64) {
352 * Copy a code snippet to restore 32-bit bridge mode
353 * to the top of every non-generic trap handler
356 trap_offset += (size_t)&restorebridgesize;
357 bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
358 bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
359 bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
360 bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
361 bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
362 bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
363 bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
366 trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode;
367 bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize);
368 bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize);
369 bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize);
370 bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize);
373 bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
377 bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend -
379 bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend -
381 bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend -
383 bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
386 bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend -
388 bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend -
392 /* Set TOC base so that the interrupt code can get at it */
393 *((void **)TRAP_GENTRAP) = &generictrap;
394 *((register_t *)TRAP_TOCBASE) = toc;
396 /* Set branch address for trap code */
397 if (cpu_features & PPC_FEATURE_64)
398 *((void **)TRAP_GENTRAP) = &generictrap64;
400 *((void **)TRAP_GENTRAP) = &generictrap;
401 *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
403 /* G2-specific TLB miss helper handlers */
404 bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize);
405 bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize);
406 bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize);
408 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
415 /* Warn if cachline size was not determined */
416 if (cacheline_warn == 1) {
417 printf("WARNING: cacheline size undetermined, setting to 32\n");
421 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
422 * in case the platform module had a better idea of what we
425 if (cpu_features & PPC_FEATURE_64)
426 pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
428 pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
432 * Shutdown the CPU as much as possible.
442 ptrace_single_step(struct thread *td)
444 struct trapframe *tf;
453 ptrace_clear_single_step(struct thread *td)
455 struct trapframe *tf;
464 kdb_cpu_clear_singlestep(void)
467 kdb_frame->srr1 &= ~PSL_SE;
471 kdb_cpu_set_singlestep(void)
474 kdb_frame->srr1 |= PSL_SE;
478 * Initialise a struct pcpu.
481 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
484 /* Copy the SLB contents from the current CPU */
485 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb));
489 #ifndef __powerpc64__
491 va_to_vsid(pmap_t pm, vm_offset_t va)
493 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
499 * These functions need to provide addresses that both (a) work in real mode
500 * (or whatever mode/circumstances the kernel is in in early boot (now)) and
501 * (b) can still, in principle, work once the kernel is going. Because these
502 * rely on existing mappings/real mode, unmap is a no-op.
505 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
507 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
510 * If we have the MMU up in early boot, assume it is 1:1. Otherwise,
511 * try to get the address in a memory region compatible with the
512 * direct map for efficiency later.
514 if (mfmsr() & PSL_DR)
517 return (DMAP_BASE_ADDRESS + pa);
521 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
524 KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
527 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
529 flush_disable_caches(void)
533 register_t cache_reg;
534 volatile uint32_t *memp;
541 mtmsr(msr & ~(PSL_EE | PSL_DR));
542 msscr0 = mfspr(SPR_MSSCR0);
543 msscr0 &= ~MSSCR0_L2PFE;
544 mtspr(SPR_MSSCR0, msscr0);
547 __asm__ __volatile__("dssall; sync");
550 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
551 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
552 __asm__ __volatile__("dcbf 0,%0" :: "r"(0));
554 /* Lock the L1 Data cache. */
555 mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
559 mtspr(SPR_LDSTCR, 0);
562 * Perform this in two stages: Flush the cache starting in RAM, then do it
565 memp = (volatile uint32_t *)0x00000000;
566 for (i = 0; i < 128 * 1024; i++) {
568 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
569 memp += 32/sizeof(*memp);
572 memp = (volatile uint32_t *)0xfff00000;
576 mtspr(SPR_LDSTCR, x);
577 for (i = 0; i < 128; i++) {
579 __asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
580 memp += 32/sizeof(*memp);
582 x = ((x << 1) | 1) & 0xff;
584 mtspr(SPR_LDSTCR, 0);
586 cache_reg = mfspr(SPR_L2CR);
587 if (cache_reg & L2CR_L2E) {
588 cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
589 mtspr(SPR_L2CR, cache_reg);
591 mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
592 while (mfspr(SPR_L2CR) & L2CR_L2HWF)
593 ; /* Busy wait for cache to flush */
595 cache_reg &= ~L2CR_L2E;
596 mtspr(SPR_L2CR, cache_reg);
598 mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
600 while (mfspr(SPR_L2CR) & L2CR_L2I)
601 ; /* Busy wait for L2 cache invalidate */
605 cache_reg = mfspr(SPR_L3CR);
606 if (cache_reg & L3CR_L3E) {
607 cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
608 mtspr(SPR_L3CR, cache_reg);
610 mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
611 while (mfspr(SPR_L3CR) & L3CR_L3HWF)
612 ; /* Busy wait for cache to flush */
614 cache_reg &= ~L3CR_L3E;
615 mtspr(SPR_L3CR, cache_reg);
617 mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
619 while (mfspr(SPR_L3CR) & L3CR_L3I)
620 ; /* Busy wait for L3 cache invalidate */
624 mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
634 static u_quad_t timebase = 0;
635 static register_t sprgs[4];
636 static register_t srrs[2];
639 struct thread *fputd;
640 struct thread *vectd;
643 register_t saved_msr;
647 PCPU_SET(restore, &resetjb);
650 fputd = PCPU_GET(fputhread);
651 vectd = PCPU_GET(vecthread);
656 if (setjmp(resetjb) == 0) {
657 sprgs[0] = mfspr(SPR_SPRG0);
658 sprgs[1] = mfspr(SPR_SPRG1);
659 sprgs[2] = mfspr(SPR_SPRG2);
660 sprgs[3] = mfspr(SPR_SPRG3);
661 srrs[0] = mfspr(SPR_SRR0);
662 srrs[1] = mfspr(SPR_SRR1);
665 flush_disable_caches();
666 hid0 = mfspr(SPR_HID0);
667 hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
670 msr = mfmsr() | PSL_POW;
671 mtspr(SPR_HID0, hid0);
677 platform_smp_timebase_sync(timebase, 0);
678 PCPU_SET(curthread, curthread);
679 PCPU_SET(curpcb, curthread->td_pcb);
680 pmap_activate(curthread);
682 mtspr(SPR_SPRG0, sprgs[0]);
683 mtspr(SPR_SPRG1, sprgs[1]);
684 mtspr(SPR_SPRG2, sprgs[2]);
685 mtspr(SPR_SPRG3, sprgs[3]);
686 mtspr(SPR_SRR0, srrs[0]);
687 mtspr(SPR_SRR1, srrs[1]);
689 if (fputd == curthread)
690 enable_fpu(curthread);
691 if (vectd == curthread)
692 enable_vec(curthread);