1 /* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * Copyright (c) 1994-1998 Mark Brinicombe.
7 * Copyright (c) 1994 Brini.
10 * This code is derived from software written for Brini by Mark Brinicombe
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Brini.
23 * 4. The name of the company nor the name of the author may be used to
24 * endorse or promote products derived from this software without specific
25 * prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * Machine dependent functions for kernel setup
45 * This file needs a lot of work.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include "opt_kstack_pages.h"
55 #define _ARM32_BUS_DMA_PRIVATE
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/sysproto.h>
59 #include <sys/signalvar.h>
60 #include <sys/imgact.h>
61 #include <sys/kernel.h>
63 #include <sys/linker.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
69 #include <sys/ptrace.h>
76 #include <sys/msgbuf.h>
77 #include <sys/devmap.h>
78 #include <machine/physmem.h>
79 #include <machine/reg.h>
80 #include <machine/cpu.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_map.h>
87 #include <machine/vmparam.h>
88 #include <machine/pcb.h>
89 #include <machine/undefined.h>
90 #include <machine/machdep.h>
91 #include <machine/metadata.h>
92 #include <machine/armreg.h>
93 #include <machine/bus.h>
94 #include <sys/reboot.h>
96 #include <arm/xscale/ixp425/ixp425reg.h>
97 #include <arm/xscale/ixp425/ixp425var.h>
99 #define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
100 #define KERNEL_PT_IO 1
101 #define KERNEL_PT_IO_NUM 3
102 #define KERNEL_PT_BEFOREKERN KERNEL_PT_IO + KERNEL_PT_IO_NUM
103 #define KERNEL_PT_AFKERNEL KERNEL_PT_BEFOREKERN + 1 /* L2 table for mapping after kernel */
104 #define KERNEL_PT_AFKERNEL_NUM 9
106 /* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */
107 #define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM)
109 struct pv_addr kernel_pt_table[NUM_KERNEL_PTS];
111 /* Physical and virtual addresses for some global pages */
113 struct pv_addr systempage;
114 struct pv_addr msgbufpv;
115 struct pv_addr irqstack;
116 struct pv_addr undstack;
117 struct pv_addr abtstack;
118 struct pv_addr kernelstack;
119 struct pv_addr minidataclean;
121 /* Static device mappings. */
122 static const struct devmap_entry ixp425_devmap[] = {
123 /* Physical/Virtual address for I/O space */
124 { IXP425_IO_VBASE, IXP425_IO_HWBASE, IXP425_IO_SIZE, },
127 { IXP425_EXP_VBASE, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, },
129 /* CFI Flash on the Expansion Bus */
130 { IXP425_EXP_BUS_CS0_VBASE, IXP425_EXP_BUS_CS0_HWBASE,
131 IXP425_EXP_BUS_CS0_SIZE, },
133 /* IXP425 PCI Configuration */
134 { IXP425_PCI_VBASE, IXP425_PCI_HWBASE, IXP425_PCI_SIZE, },
136 /* SDRAM Controller */
137 { IXP425_MCU_VBASE, IXP425_MCU_HWBASE, IXP425_MCU_SIZE, },
139 /* PCI Memory Space */
140 { IXP425_PCI_MEM_VBASE, IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_SIZE, },
142 /* Q-Mgr Memory Space */
143 { IXP425_QMGR_VBASE, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, },
148 /* Static device mappings. */
149 static const struct devmap_entry ixp435_devmap[] = {
150 /* Physical/Virtual address for I/O space */
151 { IXP425_IO_VBASE, IXP425_IO_HWBASE, IXP425_IO_SIZE, },
153 { IXP425_EXP_VBASE, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, },
155 /* IXP425 PCI Configuration */
156 { IXP425_PCI_VBASE, IXP425_PCI_HWBASE, IXP425_PCI_SIZE, },
158 /* DDRII Controller NB: mapped same place as IXP425 */
159 { IXP425_MCU_VBASE, IXP435_MCU_HWBASE, IXP425_MCU_SIZE, },
161 /* PCI Memory Space */
162 { IXP425_PCI_MEM_VBASE, IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_SIZE, },
164 /* Q-Mgr Memory Space */
165 { IXP425_QMGR_VBASE, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, },
167 /* CFI Flash on the Expansion Bus */
168 { IXP425_EXP_BUS_CS0_VBASE, IXP425_EXP_BUS_CS0_HWBASE,
169 IXP425_EXP_BUS_CS0_SIZE, },
171 /* USB1 Memory Space */
172 { IXP435_USB1_VBASE, IXP435_USB1_HWBASE, IXP435_USB1_SIZE, },
173 /* USB2 Memory Space */
174 { IXP435_USB2_VBASE, IXP435_USB2_HWBASE, IXP435_USB2_SIZE, },
176 /* GPS Memory Space */
177 { CAMBRIA_GPS_VBASE, CAMBRIA_GPS_HWBASE, CAMBRIA_GPS_SIZE, },
179 /* RS485 Memory Space */
180 { CAMBRIA_RS485_VBASE, CAMBRIA_RS485_HWBASE, CAMBRIA_RS485_SIZE, },
185 extern vm_offset_t xscale_cache_clean_addr;
188 initarm(struct arm_boot_params *abp)
190 #define next_chunk2(a,b) (((a) + (b)) &~ ((b)-1))
191 #define next_page(a) next_chunk2(a,PAGE_SIZE)
192 struct pv_addr kernel_l1pt;
193 struct pv_addr dpcpu;
196 vm_offset_t freemempos;
197 vm_offset_t freemem_pt;
198 vm_offset_t afterkern;
199 vm_offset_t freemem_after;
200 vm_offset_t lastaddr;
203 /* kernel text starts where we were loaded at boot */
204 #define KERNEL_TEXT_OFF (abp->abp_physaddr - PHYSADDR)
205 #define KERNEL_TEXT_BASE (KERNBASE + KERNEL_TEXT_OFF)
206 #define KERNEL_TEXT_PHYS (PHYSADDR + KERNEL_TEXT_OFF)
208 lastaddr = parse_boot_param(abp);
209 arm_physmem_kernaddr = abp->abp_physaddr;
210 set_cpufuncs(); /* NB: sets cputype */
211 pcpu_init(pcpup, 0, sizeof(struct pcpu));
212 PCPU_SET(curthread, &thread0);
214 init_static_kenv(NULL, 0);
216 /* Do basic tuning, hz etc */
220 * We allocate memory downwards from where we were loaded
221 * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS
222 * entries in the L2 page table. Past that we re-align the
223 * allocation boundary so later data structures (stacks, etc)
224 * can be mapped with different attributes (write-back vs
225 * write-through). Note this leaves a gap for expansion
226 * (or might be repurposed).
228 freemempos = abp->abp_physaddr;
230 /* macros to simplify initial memory allocation */
231 #define alloc_pages(var, np) do { \
232 freemempos -= (np * PAGE_SIZE); \
233 (var) = freemempos; \
234 /* NB: this works because locore maps PA=VA */ \
235 memset((char *)(var), 0, ((np) * PAGE_SIZE)); \
237 #define valloc_pages(var, np) do { \
238 alloc_pages((var).pv_pa, (np)); \
239 (var).pv_va = (var).pv_pa + (KERNVIRTADDR - abp->abp_physaddr); \
242 /* force L1 page table alignment */
243 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
244 freemempos -= PAGE_SIZE;
245 /* allocate contiguous L1 page table */
246 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
247 /* now allocate L2 page tables; they are linked to L1 below */
248 for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
249 if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
250 valloc_pages(kernel_pt_table[loop],
251 L2_TABLE_SIZE / PAGE_SIZE);
253 kernel_pt_table[loop].pv_pa = freemempos +
254 (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
256 kernel_pt_table[loop].pv_va =
257 kernel_pt_table[loop].pv_pa +
258 (KERNVIRTADDR - abp->abp_physaddr);
261 freemem_pt = freemempos; /* base of allocated pt's */
264 * Re-align allocation boundary so we can map the area
265 * write-back instead of write-through for the stacks and
266 * related structures allocated below.
268 freemempos = PHYSADDR + 0x100000;
270 * Allocate a page for the system page mapped to V0x00000000
271 * This page will just contain the system vectors and can be
272 * shared by all processes.
274 valloc_pages(systempage, 1);
276 /* Allocate dynamic per-cpu area. */
277 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
278 dpcpu_init((void *)dpcpu.pv_va, 0);
280 /* Allocate stacks for all modes */
281 valloc_pages(irqstack, IRQ_STACK_SIZE);
282 valloc_pages(abtstack, ABT_STACK_SIZE);
283 valloc_pages(undstack, UND_STACK_SIZE);
284 valloc_pages(kernelstack, kstack_pages);
285 alloc_pages(minidataclean.pv_pa, 1);
286 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
289 * Now construct the L1 page table. First map the L2
290 * page tables into the L1 so we can replace L1 mappings
291 * later on if necessary
293 l1pagetable = kernel_l1pt.pv_va;
295 /* Map the L2 pages tables in the L1 page table */
296 pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000),
297 &kernel_pt_table[KERNEL_PT_SYS]);
298 pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE,
299 &kernel_pt_table[KERNEL_PT_IO]);
300 pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE,
301 &kernel_pt_table[KERNEL_PT_IO + 1]);
302 pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE,
303 &kernel_pt_table[KERNEL_PT_IO + 2]);
304 pmap_link_l2pt(l1pagetable, KERNBASE,
305 &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
306 pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000,
307 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
308 pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000,
309 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
310 pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS,
311 next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE),
312 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
313 freemem_after = next_page((int)lastaddr);
314 afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE));
315 for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
316 pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
317 &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
319 pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
320 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
323 /* Map the Mini-Data cache clean area. */
324 xscale_setup_minidata(l1pagetable, afterkern,
325 minidataclean.pv_pa);
327 /* Map the vector page. */
328 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
329 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
331 devmap_bootstrap(l1pagetable, ixp435_devmap);
333 devmap_bootstrap(l1pagetable, ixp425_devmap);
335 * Give the XScale global cache clean code an appropriately
336 * sized chunk of unmapped VA space starting at 0xff000000
337 * (our device mappings end before this address).
339 xscale_cache_clean_addr = 0xff000000U;
341 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
342 cpu_setttb(kernel_l1pt.pv_pa);
344 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
347 * Pages were allocated during the secondary bootstrap for the
348 * stacks for different CPU modes.
349 * We must now set the r13 registers in the different CPU modes to
350 * point to these stacks.
351 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
352 * of the stack memory.
357 * We must now clean the cache again....
358 * Cleaning may be done by reading new data to displace any
359 * dirty data in the cache. This will have happened in cpu_setttb()
360 * but since we are boot strapping the addresses used for the read
361 * may have just been remapped and thus the cache could be out
362 * of sync. A re-clean after the switch will cure this.
363 * After booting there are no gross relocations of the kernel thus
364 * this problem will not occur after initarm().
366 cpu_idcache_wbinv_all();
369 /* ready to setup the console (XXX move earlier if possible) */
372 * Fetch the RAM size from the MCU registers. The
373 * expansion bus was mapped above so we can now read 'em.
376 memsize = ixp435_ddram_size();
378 memsize = ixp425_sdram_size();
382 init_proc0(kernelstack.pv_va);
384 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
386 pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
387 vm_max_kernel_address = 0xe0000000;
388 pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
389 msgbufp = (void*)msgbufpv.pv_va;
390 msgbufinit(msgbufp, msgbufsize);
394 * Add the physical ram we have available.
396 * Exclude the kernel, and all the things we allocated which immediately
397 * follow the kernel, from the VM allocation pool but not from crash
398 * dumps. virtual_avail is a global variable which tracks the kva we've
399 * "allocated" while setting up pmaps.
401 * Prepare the list of physical memory available to the vm subsystem.
403 arm_physmem_hardware_region(PHYSADDR, memsize);
404 arm_physmem_exclude_region(freemem_pt, abp->abp_physaddr -
405 freemem_pt, EXFLAG_NOALLOC);
406 arm_physmem_exclude_region(freemempos, abp->abp_physaddr - 0x100000 -
407 freemempos, EXFLAG_NOALLOC);
408 arm_physmem_exclude_region(abp->abp_physaddr,
409 virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC);
410 arm_physmem_init_kernel_globals();
412 init_param2(physmem);
415 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
416 sizeof(struct pcb)));