2 * Copyright (c) 2005 Olivier Houchard. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Since we are compiled outside of the normal kernel build process, we
27 * need to include opt_global.h manually.
29 #include "opt_global.h"
30 #include "opt_kernname.h"
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 #include <machine/asm.h>
35 #include <sys/param.h>
36 #include <sys/elf32.h>
37 #include <sys/inflate.h>
38 #include <machine/elf.h>
39 #include <machine/pte.h>
40 #include <machine/cpufunc.h>
41 #include <machine/armreg.h>
43 extern char kernel_start[];
44 extern char kernel_end[];
52 extern unsigned int cpufunc_id(void);
53 extern void armv6_idcache_wbinv_all(void);
54 extern void armv7_idcache_wbinv_all(void);
55 extern void do_call(void *, void *, void *, int);
60 #define cpu_idcache_wbinv_all arm9_idcache_wbinv_all
61 extern void arm9_idcache_wbinv_all(void);
62 #elif defined(CPU_FA526)
63 #define cpu_idcache_wbinv_all fa526_idcache_wbinv_all
64 extern void fa526_idcache_wbinv_all(void);
65 #elif defined(CPU_ARM9E)
66 #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all
67 extern void armv5_ec_idcache_wbinv_all(void);
68 #elif defined(CPU_ARM1176)
69 #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all
70 #elif defined(CPU_XSCALE_80321) || \
71 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
72 defined(CPU_XSCALE_80219)
73 #define cpu_idcache_wbinv_all xscale_cache_purgeID
74 extern void xscale_cache_purgeID(void);
75 #elif defined(CPU_XSCALE_81342)
76 #define cpu_idcache_wbinv_all xscalec3_cache_purgeID
77 extern void xscalec3_cache_purgeID(void);
78 #elif defined(CPU_MV_PJ4B)
79 #if !defined(SOC_MV_ARMADAXP)
80 #define cpu_idcache_wbinv_all armv6_idcache_wbinv_all
81 extern void armv6_idcache_wbinv_all(void);
83 #define cpu_idcache_wbinv_all() armadaxp_idcache_wbinv_all
85 #endif /* CPU_MV_PJ4B */
86 #ifdef CPU_XSCALE_81342
87 #define cpu_l2cache_wbinv_all xscalec3_l2cache_purge
88 extern void xscalec3_l2cache_purge(void);
89 #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY)
90 #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all
91 extern void sheeva_l2cache_wbinv_all(void);
92 #elif defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 #define cpu_idcache_wbinv_all armv7_idcache_wbinv_all
94 #define cpu_l2cache_wbinv_all()
96 #define cpu_l2cache_wbinv_all()
99 static void armadaxp_idcache_wbinv_all(void);
101 int arm_picache_size;
102 int arm_picache_line_size;
103 int arm_picache_ways;
105 int arm_pdcache_size; /* and unified */
106 int arm_pdcache_line_size = 32;
107 int arm_pdcache_ways;
110 int arm_pcache_unified;
112 int arm_dcache_align;
113 int arm_dcache_align_mask;
115 int arm_dcache_min_line_size = 32;
116 int arm_icache_min_line_size = 32;
117 int arm_idcache_min_line_size = 32;
119 u_int arm_cache_level;
120 u_int arm_cache_type[14];
123 /* Additional cache information local to this file. Log2 of some of the
125 static int arm_dcache_l2_nsets;
126 static int arm_dcache_l2_assoc;
127 static int arm_dcache_l2_linesize;
130 int block_userspace_access = 0;
131 extern int arm9_dcache_sets_inc;
132 extern int arm9_dcache_sets_max;
133 extern int arm9_dcache_index_max;
134 extern int arm9_dcache_index_inc;
136 static __inline void *
137 memcpy(void *dst, const void *src, int len)
143 if (0 && len >= 4 && !((vm_offset_t)d & 3) &&
144 !((vm_offset_t)s & 3)) {
145 *(uint32_t *)d = *(uint32_t *)s;
158 bzero(void *addr, int count)
160 char *tmp = (char *)addr;
163 if (count >= 4 && !((vm_offset_t)tmp & 3)) {
164 *(uint32_t *)tmp = 0;
175 static void arm9_setup(void);
181 unsigned int sp = ((unsigned int)&_end & ~3) + 4;
182 unsigned int pc, kernphysaddr;
185 * Figure out the physical address the kernel was loaded at. This
186 * assumes the entry point (this code right here) is in the first page,
187 * which will always be the case for this trampoline code.
189 __asm __volatile("mov %0, pc\n"
191 kernphysaddr = pc & ~PAGE_MASK;
193 #if defined(FLASHADDR) && defined(PHYSADDR) && defined(LOADERRAMADDR)
194 if ((FLASHADDR > LOADERRAMADDR && pc >= FLASHADDR) ||
195 (FLASHADDR < LOADERRAMADDR && pc < LOADERRAMADDR)) {
197 * We're running from flash, so just copy the whole thing
198 * from flash to memory.
199 * This is far from optimal, we could do the relocation or
200 * the unzipping directly from flash to memory to avoid this
201 * needless copy, but it would require to know the flash
204 unsigned int target_addr;
206 uint32_t src_addr = (uint32_t)&_start - PHYSADDR + FLASHADDR
207 + (pc - FLASHADDR - ((uint32_t)&_startC - PHYSADDR)) & 0xfffff000;
209 target_addr = (unsigned int)&_start - PHYSADDR + LOADERRAMADDR;
210 tmp_sp = target_addr + 0x100000 +
211 (unsigned int)&_end - (unsigned int)&_start;
212 memcpy((char *)target_addr, (char *)src_addr,
213 (unsigned int)&_end - (unsigned int)&_start);
214 /* Temporary set the sp and jump to the new location. */
218 : : "r" (target_addr), "r" (tmp_sp));
223 sp += KERNSIZE + 0x100;
224 sp &= ~(L1_TABLE_SIZE - 1);
225 sp += 2 * L1_TABLE_SIZE;
227 sp += 1024 * 1024; /* Should be enough for a stack */
229 __asm __volatile("adr %0, 2f\n"
230 "bic %0, %0, #0xff000000\n"
231 "and %1, %1, #0xff000000\n"
233 "mrc p15, 0, %1, c1, c0, 0\n"
234 "bic %1, %1, #1\n" /* Disable MMU */
235 "orr %1, %1, #(4 | 8)\n" /* Add DC enable,
237 "orr %1, %1, #0x1000\n" /* Add IC enable */
238 "orr %1, %1, #(0x800)\n" /* BPRD enable */
240 "mcr p15, 0, %1, c1, c0, 0\n"
247 : "=r" (tmp1), "+r" (kernphysaddr), "+r" (sp));
250 /* So that idcache_wbinv works; */
251 if ((cpufunc_id() & 0x0000f000) == 0x00009000)
261 u_int ctype, isize, dsize, cpuid;
262 u_int clevel, csize, i, sel;
266 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
269 cpuid = cpufunc_id();
271 * ...and thus spake the ARM ARM:
273 * If an <opcode2> value corresponding to an unimplemented or
274 * reserved ID register is encountered, the System Control
275 * processor returns the value of the main ID register.
280 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
281 /* Resolve minimal cache line sizes */
282 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
283 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
284 arm_idcache_min_line_size =
285 (arm_dcache_min_line_size > arm_icache_min_line_size ?
286 arm_icache_min_line_size : arm_dcache_min_line_size);
288 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
290 arm_cache_level = clevel;
291 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level) + 1;
293 while ((type = (clevel & 0x7)) && i < 7) {
294 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
295 type == CACHE_SEP_CACHE) {
297 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
299 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
301 arm_cache_type[sel] = csize;
303 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
305 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
307 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
309 arm_cache_type[sel] = csize;
315 if ((ctype & CPU_CT_S) == 0)
316 arm_pcache_unified = 1;
319 * If you want to know how this code works, go read the ARM ARM.
322 arm_pcache_type = CPU_CT_CTYPE(ctype);
324 if (arm_pcache_unified == 0) {
325 isize = CPU_CT_ISIZE(ctype);
326 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
327 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
328 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
329 if (isize & CPU_CT_xSIZE_M)
330 arm_picache_line_size = 0; /* not present */
332 arm_picache_ways = 1;
334 arm_picache_ways = multiplier <<
335 (CPU_CT_xSIZE_ASSOC(isize) - 1);
337 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
340 dsize = CPU_CT_DSIZE(ctype);
341 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
342 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
343 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
344 if (dsize & CPU_CT_xSIZE_M)
345 arm_pdcache_line_size = 0; /* not present */
347 arm_pdcache_ways = 1;
349 arm_pdcache_ways = multiplier <<
350 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
352 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
354 arm_dcache_align = arm_pdcache_line_size;
356 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
357 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
358 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
359 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
362 arm_dcache_align_mask = arm_dcache_align - 1;
370 get_cachetype_cp15();
371 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
372 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
373 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
374 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
375 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
379 armadaxp_idcache_wbinv_all(void)
383 __asm __volatile("mrc p15, 0, %0, c0, c1, 0" : "=r" (feat));
384 if (feat & ARM_PFR0_THUMBEE_MASK)
385 armv7_idcache_wbinv_all();
387 armv6_idcache_wbinv_all();
391 static unsigned char *orig_input, *i_input, *i_output;
394 static u_int memcnt; /* Memory allocated: blocks */
395 static size_t memtot; /* Memory allocated: bytes */
397 * Library functions required by inflate().
400 #define MEMSIZ 0x8000
403 * Allocate memory block.
409 static u_char mem[MEMSIZ];
411 if (memtot + size > MEMSIZ)
420 * Free allocated memory block.
438 if ((size_t)(i_input - orig_input) >= KERNCOMPSIZE) {
445 output(void *dummy, unsigned char *ptr, unsigned long len)
449 memcpy(i_output, ptr, len);
455 inflate_kernel(void *kernel, void *startaddr)
458 unsigned char slide[GZ_WSIZE];
462 i_input = (unsigned char *)kernel + GZ_HEAD;
463 if (((char *)kernel)[3] & 0x18) {
468 i_output = startaddr;
469 bzero(&infl, sizeof(infl));
470 infl.gz_input = input;
471 infl.gz_output = output;
472 infl.gz_slide = slide;
474 return ((char *)(((vm_offset_t)i_output & ~3) + 4));
480 load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end,
484 Elf32_Phdr phdr[64] /* XXX */, *php;
485 Elf32_Shdr shdr[64] /* XXX */;
488 int symtabindex = -1;
489 int symstrindex = -1;
490 vm_offset_t lastaddr = 0;
494 eh = (Elf32_Ehdr *)kstart;
496 entry_point = (void*)eh->e_entry;
497 memcpy(phdr, (void *)(kstart + eh->e_phoff ),
498 eh->e_phnum * sizeof(phdr[0]));
500 /* Determine lastaddr. */
501 for (i = 0; i < eh->e_phnum; i++) {
502 if (lastaddr < (phdr[i].p_vaddr - KERNVIRTADDR + curaddr
504 lastaddr = phdr[i].p_vaddr - KERNVIRTADDR +
505 curaddr + phdr[i].p_memsz;
508 /* Save the symbol tables, as there're about to be scratched. */
509 memcpy(shdr, (void *)(kstart + eh->e_shoff),
510 sizeof(*shdr) * eh->e_shnum);
511 if (eh->e_shnum * eh->e_shentsize != 0 &&
513 for (i = 0; i < eh->e_shnum; i++) {
514 if (shdr[i].sh_type == SHT_SYMTAB) {
515 for (j = 0; j < eh->e_phnum; j++) {
516 if (phdr[j].p_type == PT_LOAD &&
523 shdr[i].sh_offset = 0;
528 if (shdr[i].sh_offset != 0 &&
529 shdr[i].sh_size != 0) {
531 symstrindex = shdr[i].sh_link;
535 func_end = roundup(func_end, sizeof(long));
536 if (symtabindex >= 0 && symstrindex >= 0) {
539 memcpy((void *)func_end, (void *)(
540 shdr[symtabindex].sh_offset + kstart),
541 shdr[symtabindex].sh_size);
542 memcpy((void *)(func_end +
543 shdr[symtabindex].sh_size),
544 (void *)(shdr[symstrindex].sh_offset +
545 kstart), shdr[symstrindex].sh_size);
547 lastaddr += shdr[symtabindex].sh_size;
548 lastaddr = roundup(lastaddr,
549 sizeof(shdr[symtabindex].sh_size));
550 lastaddr += sizeof(shdr[symstrindex].sh_size);
551 lastaddr += shdr[symstrindex].sh_size;
552 lastaddr = roundup(lastaddr,
553 sizeof(shdr[symstrindex].sh_size));
559 return ((void *)lastaddr);
562 for (i = 0; i < j; i++) {
565 if (phdr[i].p_type != PT_LOAD)
567 memcpy((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr),
568 (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz);
569 /* Clean space from oversized segments, eg: bss. */
570 if (phdr[i].p_filesz < phdr[i].p_memsz)
571 bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR +
572 curaddr + phdr[i].p_filesz), phdr[i].p_memsz -
575 /* Now grab the symbol tables. */
576 if (symtabindex >= 0 && symstrindex >= 0) {
577 *(Elf_Size *)lastaddr =
578 shdr[symtabindex].sh_size;
579 lastaddr += sizeof(shdr[symtabindex].sh_size);
580 memcpy((void*)lastaddr,
582 shdr[symtabindex].sh_size);
583 lastaddr += shdr[symtabindex].sh_size;
584 lastaddr = roundup(lastaddr,
585 sizeof(shdr[symtabindex].sh_size));
586 *(Elf_Size *)lastaddr =
587 shdr[symstrindex].sh_size;
588 lastaddr += sizeof(shdr[symstrindex].sh_size);
589 memcpy((void*)lastaddr,
591 shdr[symtabindex].sh_size),
592 shdr[symstrindex].sh_size);
593 lastaddr += shdr[symstrindex].sh_size;
594 lastaddr = roundup(lastaddr,
595 sizeof(shdr[symstrindex].sh_size));
596 *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER;
597 *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR;
598 *((Elf_Addr *)curaddr + 2) = lastaddr - curaddr + KERNVIRTADDR;
600 *(Elf_Addr *)curaddr = 0;
601 /* Invalidate the instruction cache. */
602 __asm __volatile("mcr p15, 0, %0, c7, c5, 0\n"
603 "mcr p15, 0, %0, c7, c10, 4\n"
605 __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n"
606 "bic %0, %0, #1\n" /* MMU_ENABLE */
607 "mcr p15, 0, %0, c1, c0, 0\n"
609 /* Jump to the entry point. */
610 ((void(*)(void))(entry_point - KERNVIRTADDR + curaddr))();
611 __asm __volatile(".globl func_end\n"
618 extern char func_end[];
621 #define PMAP_DOMAIN_KERNEL 0 /*
622 * Just define it instead of including the
623 * whole VM headers set.
627 setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend,
630 unsigned int *pd = (unsigned int *)pt_addr;
632 int domain = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT;
635 bzero(pd, L1_TABLE_SIZE);
636 for (addr = physstart; addr < physend; addr += L1_S_SIZE) {
637 pd[addr >> L1_S_SHIFT] = L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)|
638 L1_S_DOM(PMAP_DOMAIN_KERNEL) | addr;
640 pd[addr >> L1_S_SHIFT] |= L1_S_B;
643 if (0xfff00000 < physstart || 0xfff00000 > physend)
644 pd[0xfff00000 >> L1_S_SHIFT] = L1_TYPE_S|L1_S_AP(AP_KRW)|
645 L1_S_DOM(PMAP_DOMAIN_KERNEL)|physstart;
646 __asm __volatile("mcr p15, 0, %1, c2, c0, 0\n" /* set TTB */
647 "mcr p15, 0, %1, c8, c7, 0\n" /* Flush TTB */
648 "mcr p15, 0, %2, c3, c0, 0\n" /* Set DAR */
649 "mrc p15, 0, %0, c1, c0, 0\n"
650 "orr %0, %0, #1\n" /* MMU_ENABLE */
651 "mcr p15, 0, %0, c1, c0, 0\n"
652 "mrc p15, 0, %0, c2, c0, 0\n" /* CPWAIT */
655 "=r" (tmp) : "r" (pd), "r" (domain));
658 * XXX: This is the most stupid workaround I've ever wrote.
659 * For some reason, the KB9202 won't boot the kernel unless
660 * we access an address which is not in the
661 * 0x20000000 - 0x20ffffff range. I hope I'll understand
662 * what's going on later.
664 __hack = *(volatile int *)0xfffff21c;
672 char *kernel = (char *)&kernel_start;
676 __asm __volatile("mov %0, pc" :
678 curaddr = (void*)((unsigned int)curaddr & 0xfff00000);
680 if (*kernel == 0x1f && kernel[1] == 0x8b) {
681 pt_addr = (((int)&_end + KERNSIZE + 0x100) &
682 ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
685 /* So that idcache_wbinv works; */
686 if ((cpufunc_id() & 0x0000f000) == 0x00009000)
689 setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
690 (vm_paddr_t)curaddr + 0x10000000, 1);
692 dst = inflate_kernel(kernel, &_end);
693 kernel = (char *)&_end;
694 altdst = 4 + load_kernel((unsigned int)kernel,
695 (unsigned int)curaddr,
696 (unsigned int)&func_end + 800 , 0);
701 * Disable MMU. Otherwise, setup_pagetables call below
702 * might overwrite the L1 table we are currently using.
704 cpu_idcache_wbinv_all();
705 cpu_l2cache_wbinv_all();
706 __asm __volatile("mrc p15, 0, %0, c1, c0, 0\n"
707 "bic %0, %0, #1\n" /* MMU_DISABLE */
708 "mcr p15, 0, %0, c1, c0, 0\n"
712 dst = 4 + load_kernel((unsigned int)&kernel_start,
713 (unsigned int)curaddr,
714 (unsigned int)&func_end, 0);
715 dst = (void *)(((vm_offset_t)dst & ~3));
716 pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
717 setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
718 (vm_paddr_t)curaddr + 0x10000000, 0);
719 sp = pt_addr + L1_TABLE_SIZE + 8192;
721 dst = (void *)(sp + 4);
722 memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end -
723 (unsigned int)&load_kernel + 800);
724 do_call(dst, kernel, dst + (unsigned int)(&func_end) -
725 (unsigned int)(&load_kernel) + 800, sp);
728 /* We need to provide these functions but never call them */
729 void __aeabi_unwind_cpp_pr0(void);
730 void __aeabi_unwind_cpp_pr1(void);
731 void __aeabi_unwind_cpp_pr2(void);
733 __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr1);
734 __strong_reference(__aeabi_unwind_cpp_pr0, __aeabi_unwind_cpp_pr2);
736 __aeabi_unwind_cpp_pr0(void)