2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
35 * originally from: locore.s, by William F. Jolitz
37 * Substantially rewritten by David Greenman, Rod Grimes,
38 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
42 #include "opt_bootp.h"
43 #include "opt_compat.h"
44 #include "opt_nfsroot.h"
47 #include <sys/syscall.h>
48 #include <sys/reboot.h>
50 #include <machine/asmacros.h>
51 #include <machine/cputypes.h>
52 #include <machine/psl.h>
53 #include <machine/pmap.h>
54 #include <machine/specialreg.h>
61 * Note: This version greatly munged to avoid various assembler errors
62 * that may be fixed in newer versions of gas. Perhaps newer versions
63 * will have more pleasant appearance.
67 * PTmap is recursive pagemap at top of virtual address space.
68 * Within PTmap, the page directory can be found (third indirection).
70 .globl PTmap,PTD,PTDpde
71 .set PTmap,(PTDPTDI << PDRSHIFT)
72 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
73 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
76 * Compiled KERNBASE location and the kernel load address
79 .set kernbase,KERNBASE
81 .set kernload,KERNLOAD
87 ALIGN_DATA /* just to be sure */
89 .space 0x2000 /* space for tmpstk - temporary stack */
93 bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
96 KERNend: .long 0 /* phys addr end of kernel (just after bss) */
97 physfree: .long 0 /* phys addr of next free page */
100 IdlePTD: .long 0 /* phys addr of kernel PTD */
104 IdlePDPT: .long 0 /* phys addr of kernel PDPT */
108 KPTmap: .long 0 /* address of kernel page tables */
111 KPTphys: .long 0 /* phys addr of kernel page tables */
114 proc0uarea: .long 0 /* address of proc 0 uarea (unused)*/
115 proc0kstack: .long 0 /* address of proc 0 kstack space */
116 p0upa: .long 0 /* phys addr of proc0 UAREA (unused) */
117 p0kpa: .long 0 /* phys addr of proc0's STACK */
119 vm86phystk: .long 0 /* PA of vm86/bios stack */
121 .globl vm86paddr, vm86pa
122 vm86paddr: .long 0 /* address of vm86 region */
123 vm86pa: .long 0 /* phys addr of vm86 region */
126 .globl pc98_system_parameter
127 pc98_system_parameter:
131 /**********************************************************************
137 #define R(foo) ((foo)-KERNBASE)
139 #define ALLOCPAGES(foo) \
140 movl R(physfree), %esi ; \
141 movl $((foo)*PAGE_SIZE), %eax ; \
143 movl %eax, R(physfree) ; \
145 movl $((foo)*PAGE_SIZE),%ecx ; \
153 * eax = page frame address
154 * ebx = index into page table
155 * ecx = how many pages to map
156 * base = base address of page dir/table
157 * prot = protection bits
159 #define fillkpt(base, prot) \
160 shll $PTESHIFT,%ebx ; \
164 1: movl %eax,(%ebx) ; \
165 addl $PAGE_SIZE,%eax ; /* increment physical address */ \
166 addl $PTESIZE,%ebx ; /* next pte */ \
171 * eax = physical address
172 * ecx = how many pages to map
173 * prot = protection bits
175 #define fillkptphys(prot) \
177 shrl $PAGE_SHIFT, %ebx ; \
178 fillkpt(R(KPTphys), prot)
181 /**********************************************************************
183 * This is where the bootblocks start us, set the ball rolling...
186 NON_GPROF_ENTRY(btext)
189 /* save SYSTEM PARAMETER for resume (NS/T or other) */
191 movl $R(pc98_system_parameter),%edi
197 /* Tell the bios to warmboot next time */
201 /* Set up a real frame in case the double return in newboot is executed. */
205 /* Don't trust what the BIOS gives for eflags. */
210 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
211 * to set %cs, %ds, %es and %ss.
218 * Clear the bss. Not all boot programs do it, and it is our job anyway.
220 * XXX we don't check that there is memory for our bss and page tables
223 * Note: we must be careful to not overwrite an active gdt or idt. They
224 * inactive from now until we switch to new ones, since we don't load any
225 * more segment registers or permit interrupts until after the switch.
235 call recover_bootinfo
237 /* Get onto a stack that we can trust. */
239 * XXX this step is delayed in case recover_bootinfo needs to return via
240 * the old stack, but it need not be, since recover_bootinfo actually
241 * returns via the old frame.
246 /* pc98_machine_type & M_EPSON_PC98 */
247 testb $0x02,R(pc98_system_parameter)+220
249 /* epson_machine_id <= 0x0b */
250 cmpb $0x0b,R(pc98_system_parameter)+224
253 /* count up memory */
254 movl $0x100000,%eax /* next, talley remaining memory */
255 movl $0xFFF-0x100,%ecx
256 1: movl 0(%eax),%ebx /* save location to check */
257 movl $0xa55a5aa5,0(%eax) /* write test pattern */
258 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */
260 movl %ebx,0(%eax) /* restore memory */
263 2: subl $0x100000,%eax
265 movb %al,R(pc98_system_parameter)+1
268 movw R(pc98_system_parameter+0x86),%ax
273 call create_pagetables
276 * If the CPU has support for VME, turn it on.
278 testl $CPUID_VME, R(cpu_feature)
285 /* Now enable paging */
287 movl R(IdlePDPT), %eax
293 movl R(IdlePTD), %eax
294 movl %eax,%cr3 /* load ptd addr into mmu */
296 movl %cr0,%eax /* get control word */
297 orl $CR0_PE|CR0_PG,%eax /* enable paging */
298 movl %eax,%cr0 /* and let's page NOW! */
300 pushl $begin /* jump to high virtualized address */
303 /* now running relocated at KERNBASE where the system is linked to run */
305 /* set up bootstrap stack */
306 movl proc0kstack,%eax /* location of in-kernel stack */
307 /* bootstrap stack end location */
308 leal (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
310 xorl %ebp,%ebp /* mark end of frames */
317 movl %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
319 pushl physfree /* value of first for init386(first) */
320 call init386 /* wire 386 chip for unix operation */
323 * Clean up the stack in a way that db_numargs() understands, so
324 * that backtraces in ddb don't underrun the stack. Traps for
325 * inaccessible memory are more fatal than usual this early.
329 call mi_startup /* autoconfiguration, mountroot etc */
331 addl $0,%esp /* for db_numargs() again */
334 * Signal trampoline, copied to top of user stack
336 NON_GPROF_ENTRY(sigcode)
337 calll *SIGF_HANDLER(%esp)
338 leal SIGF_UC(%esp),%eax /* get ucontext */
340 testl $PSL_VM,UC_EFLAGS(%eax)
342 mov UC_GS(%eax),%gs /* restore %gs */
344 movl $SYS_sigreturn,%eax
345 pushl %eax /* junk to fake return addr. */
346 int $0x80 /* enter kernel with args */
351 #ifdef COMPAT_FREEBSD4
354 calll *SIGF_HANDLER(%esp)
355 leal SIGF_UC4(%esp),%eax /* get ucontext */
357 testl $PSL_VM,UC4_EFLAGS(%eax)
359 mov UC4_GS(%eax),%gs /* restore %gs */
361 movl $344,%eax /* 4.x SYS_sigreturn */
362 pushl %eax /* junk to fake return addr. */
363 int $0x80 /* enter kernel with args */
372 call *SIGF_HANDLER(%esp) /* call signal handler */
373 lea SIGF_SC(%esp),%eax /* get sigcontext */
375 testl $PSL_VM,SC_PS(%eax)
377 mov SC_GS(%eax),%gs /* restore %gs */
379 movl $103,%eax /* 3.x SYS_sigreturn */
380 pushl %eax /* junk to fake return addr. */
381 int $0x80 /* enter kernel with args */
383 #endif /* COMPAT_43 */
391 .long esigcode-sigcode
392 #ifdef COMPAT_FREEBSD4
393 .globl szfreebsd4_sigcode
395 .long esigcode-freebsd4_sigcode
400 .long esigcode-osigcode
404 /**********************************************************************
406 * Recover the bootinfo passed to us from the boot program
411 * This code is called in different ways depending on what loaded
412 * and started the kernel. This is used to detect how we get the
413 * arguments from the other code and what we do with them.
415 * Old disk boot blocks:
416 * (*btext)(howto, bootdev, cyloffset, esym);
417 * [return address == 0, and can NOT be returned to]
418 * [cyloffset was not supported by the FreeBSD boot code
419 * and always passed in as 0]
420 * [esym is also known as total in the boot code, and
421 * was never properly supported by the FreeBSD boot code]
423 * Old diskless netboot code:
424 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
425 * [return address != 0, and can NOT be returned to]
426 * If we are being booted by this code it will NOT work,
427 * so we are just going to halt if we find this case.
429 * New uniform boot code:
430 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
431 * [return address != 0, and can be returned to]
433 * There may seem to be a lot of wasted arguments in here, but
434 * that is so the newer boot code can still load very old kernels
435 * and old boot code can load new kernels.
439 * The old style disk boot blocks fake a frame on the stack and
440 * did an lret to get here. The frame on the stack has a return
447 * We have some form of return address, so this is either the
448 * old diskless netboot code, or the new uniform code. That can
449 * be detected by looking at the 5th argument, if it is 0
450 * we are being booted by the new uniform boot code.
456 * Seems we have been loaded by the old diskless boot code, we
457 * don't stand a chance of running as the diskless structure
458 * changed considerably between the two, so just halt.
463 * We have been loaded by the new uniform boot code.
464 * Let's check the bootinfo version, and if we do not understand
465 * it we return to the loader with a status of 1 to indicate this error
468 movl 28(%ebp),%ebx /* &bootinfo.version */
469 movl BI_VERSION(%ebx),%eax
470 cmpl $1,%eax /* We only understand version 1 */
472 movl $1,%eax /* Return status */
475 * XXX this returns to our caller's caller (as is required) since
476 * we didn't set up a frame and our caller did.
482 * If we have a kernelname copy it in
484 movl BI_KERNELNAME(%ebx),%esi
486 je 2f /* No kernelname */
487 movl $MAXPATHLEN,%ecx /* Brute force!!! */
488 movl $R(kernelname),%edi
489 cmpb $'/',(%esi) /* Make sure it starts with a slash */
501 * Determine the size of the boot loader's copy of the bootinfo
502 * struct. This is impossible to do properly because old versions
503 * of the struct don't contain a size field and there are 2 old
504 * versions with the same version number.
506 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */
507 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */
508 je got_bi_size /* no, sizeless version */
509 movl BI_SIZE(%ebx),%ecx
513 * Copy the common part of the bootinfo struct
516 movl $R(bootinfo),%edi
517 cmpl $BOOTINFO_SIZE,%ecx
518 jbe got_common_bi_size
519 movl $BOOTINFO_SIZE,%ecx
528 * If we have a nfs_diskless structure copy it in
530 movl BI_NFS_DISKLESS(%ebx),%esi
533 movl $R(nfs_diskless),%edi
534 movl $NFSDISKLESS_SIZE,%ecx
538 movl $R(nfs_diskless_valid),%edi
544 * The old style disk boot.
545 * (*btext)(howto, bootdev, cyloffset, esym);
546 * Note that the newer boot code just falls into here to pick
547 * up howto and bootdev, cyloffset and esym are no longer used
551 movl %eax,R(boothowto)
558 /**********************************************************************
560 * Identify the CPU and initialize anything special about it
565 /* Try to toggle alignment check flag; does not exist on 386. */
582 /* NexGen CPU does not have aligment check flag. */
596 movl $CPU_NX586,R(cpu)
597 movl $0x4778654e,R(cpu_vendor) # store vendor string
598 movl $0x72446e65,R(cpu_vendor+4)
599 movl $0x6e657669,R(cpu_vendor+8)
600 movl $0,R(cpu_vendor+12)
603 try486: /* Try to toggle identification flag; does not exist on early 486s. */
623 * Cyrix CPUs do not change the undefined flags following
624 * execution of the divide instruction which divides 5 by 2.
626 * Note: CPUID is enabled on M2, so it passes another way.
636 jmp 3f /* You may use Intel CPU. */
641 * IBM Bluelighting CPU also doesn't change the undefined flags.
642 * Because IBM doesn't disclose the information for Bluelighting
643 * CPU, we couldn't distinguish it from Cyrix's (including IBM
644 * brand of Cyrix CPUs).
646 movl $0x69727943,R(cpu_vendor) # store vendor string
647 movl $0x736e4978,R(cpu_vendor+4)
648 movl $0x64616574,R(cpu_vendor+8)
651 trycpuid: /* Use the `cpuid' instruction. */
654 movl %eax,R(cpu_high) # highest capability
655 movl %ebx,R(cpu_vendor) # store vendor string
656 movl %edx,R(cpu_vendor+4)
657 movl %ecx,R(cpu_vendor+8)
658 movb $0,R(cpu_vendor+12)
662 movl %eax,R(cpu_id) # store cpu_id
663 movl %ebx,R(cpu_procinfo) # store cpu_procinfo
664 movl %edx,R(cpu_feature) # store cpu_feature
665 movl %ecx,R(cpu_feature2) # store cpu_feature2
666 rorl $8,%eax # extract family type
671 /* less than Pentium; must be 486 */
681 /* Greater than Pentium...call it a Pentium Pro */
687 /**********************************************************************
689 * Create the first page directory and its page tables.
695 /* Find end of kernel image (rounded up to a page boundary). */
698 /* Include symbols, if any. */
699 movl R(bootinfo+BI_ESYMTAB),%edi
704 addl %edi,R(bootinfo+BI_SYMTAB)
705 addl %edi,R(bootinfo+BI_ESYMTAB)
708 /* If we are told where the end of the kernel space is, believe it. */
709 movl R(bootinfo+BI_KERNEND),%edi
715 addl $PDRMASK,%esi /* Play conservative for now, and */
716 andl $~PDRMASK,%esi /* ... wrap to next 4M. */
717 movl %esi,R(KERNend) /* save end of kernel */
718 movl %esi,R(physfree) /* next free page is at end of kernel */
720 /* Allocate Kernel Page Tables */
723 addl $(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi
726 /* Allocate Page Table Directory */
728 /* XXX only need 32 bytes (easier for now) */
730 movl %esi,R(IdlePDPT)
735 /* Allocate KSTACK */
736 ALLOCPAGES(KSTACK_PAGES)
739 movl %esi, R(proc0kstack)
741 ALLOCPAGES(1) /* vm86/bios stack */
742 movl %esi,R(vm86phystk)
744 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
747 movl %esi, R(vm86paddr)
750 * Enable PSE and PGE.
753 testl $CPUID_PSE, R(cpu_feature)
755 movl $PG_PS, R(pseflag)
762 testl $CPUID_PGE, R(cpu_feature)
764 movl $PG_G, R(pgeflag)
772 * Initialize page table pages mapping physical address zero through the
773 * end of the kernel. All of the page table entries allow read and write
774 * access. Write access to the first physical page is required by bios32
775 * calls, and write access to the first 1 MB of physical memory is required
776 * by ACPI for implementing suspend and resume. We do this even
777 * if we've enabled PSE above, we'll just switch the corresponding kernel
778 * PDEs before we turn on paging.
780 * XXX: We waste some pages here in the PSE case! DON'T BLINDLY REMOVE
781 * THIS! SMP needs the page table to be there to map the kernel P==V.
785 shrl $PAGE_SHIFT,%ecx
788 /* Map page table pages. */
793 /* Map page directory. */
795 movl R(IdlePDPT), %eax
800 movl R(IdlePTD), %eax
804 /* Map proc0's KSTACK in the physical way ... */
806 movl $(KSTACK_PAGES), %ecx
810 movl $ISA_HOLE_START, %eax
811 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
814 /* Map space for the vm86 region */
815 movl R(vm86phystk), %eax
819 /* Map page 0 into the vm86 page table */
823 fillkpt(R(vm86pa), $PG_RW|PG_U)
825 /* ...likewise for the ISA hole */
826 movl $ISA_HOLE_START, %eax
827 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
828 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
829 fillkpt(R(vm86pa), $PG_RW|PG_U)
832 * Create an identity mapping for low physical memory, including the kernel.
833 * The part of this mapping that covers the first 1 MB of physical memory
834 * becomes a permanent part of the kernel's address space. The rest of this
835 * mapping is destroyed in pmap_bootstrap(). Ordinarily, the same page table
836 * pages are shared by the identity mapping and the kernel's native mapping.
837 * However, the permanent identity mapping cannot contain PG_G mappings.
838 * Thus, if the kernel is loaded within the permanent identity mapping, that
839 * page table page must be duplicated and not shared.
841 * N.B. Due to errata concerning large pages and physical address zero,
842 * a PG_PS mapping is not used.
844 movl R(KPTphys), %eax
847 fillkpt(R(IdlePTD), $PG_RW)
848 #if KERNLOAD < (1 << PDRSHIFT)
849 testl $PG_G, R(pgeflag)
853 movl R(IdlePTD), %eax
856 movl $PAGE_SIZE, %ecx
864 * For the non-PSE case, install PDEs for PTs covering the KVA.
865 * For the PSE case, do the same, but clobber the ones corresponding
866 * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
867 * PDEs immediately after.
869 movl R(KPTphys), %eax
872 fillkpt(R(IdlePTD), $PG_RW)
876 movl R(KERNend), %ecx
880 movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
882 addl R(IdlePTD), %ebx
883 orl $(PG_V|PG_RW|PG_PS), %eax
885 addl $(1 << PDRSHIFT), %eax
890 /* install a pde recursively mapping page directory as a page table */
891 movl R(IdlePTD), %eax
894 fillkpt(R(IdlePTD), $PG_RW)
897 movl R(IdlePTD), %eax
900 fillkpt(R(IdlePDPT), $0x0)