]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
Import 1.14.3
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/devmap.h>
42 #include <sys/efi.h>
43 #include <sys/exec.h>
44 #include <sys/imgact.h>
45 #include <sys/kdb.h> 
46 #include <sys/kernel.h>
47 #include <sys/limits.h>
48 #include <sys/linker.h>
49 #include <sys/msgbuf.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/ptrace.h>
53 #include <sys/reboot.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/signalvar.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
59 #include <sys/sysproto.h>
60 #include <sys/ucontext.h>
61 #include <sys/vdso.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_pager.h>
70
71 #include <machine/armreg.h>
72 #include <machine/cpu.h>
73 #include <machine/debug_monitor.h>
74 #include <machine/kdb.h>
75 #include <machine/machdep.h>
76 #include <machine/metadata.h>
77 #include <machine/md_var.h>
78 #include <machine/pcb.h>
79 #include <machine/reg.h>
80 #include <machine/undefined.h>
81 #include <machine/vmparam.h>
82
83 #ifdef VFP
84 #include <machine/vfp.h>
85 #endif
86
87 #ifdef DEV_ACPI
88 #include <contrib/dev/acpica/include/acpi.h>
89 #include <machine/acpica_machdep.h>
90 #endif
91
92 #ifdef FDT
93 #include <dev/fdt/fdt_common.h>
94 #include <dev/ofw/openfirm.h>
95 #endif
96
97
98 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
99
100 struct pcpu __pcpu[MAXCPU];
101
102 static struct trapframe proc0_tf;
103
104 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
105 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
106
107 int early_boot = 1;
108 int cold = 1;
109 long realmem = 0;
110 long Maxmem = 0;
111
112 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
113 vm_paddr_t physmap[PHYSMAP_SIZE];
114 u_int physmap_idx;
115
116 struct kva_md_info kmi;
117
118 int64_t dcache_line_size;       /* The minimum D cache line size */
119 int64_t icache_line_size;       /* The minimum I cache line size */
120 int64_t idcache_line_size;      /* The minimum cache line size */
121 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
122 int has_pan;
123
124 /* pagezero_* implementations are provided in support.S */
125 void pagezero_simple(void *);
126 void pagezero_cache(void *);
127
128 /* pagezero_simple is default pagezero */
129 void (*pagezero)(void *p) = pagezero_simple;
130
131 static void
132 pan_setup(void)
133 {
134         uint64_t id_aa64mfr1;
135
136         id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
137         if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
138                 has_pan = 1;
139 }
140
141 void
142 pan_enable(void)
143 {
144
145         /*
146          * The LLVM integrated assembler doesn't understand the PAN
147          * PSTATE field. Because of this we need to manually create
148          * the instruction in an asm block. This is equivalent to:
149          * msr pan, #1
150          *
151          * This sets the PAN bit, stopping the kernel from accessing
152          * memory when userspace can also access it unless the kernel
153          * uses the userspace load/store instructions.
154          */
155         if (has_pan) {
156                 WRITE_SPECIALREG(sctlr_el1,
157                     READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
158                 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
159         }
160 }
161
162 static void
163 cpu_startup(void *dummy)
164 {
165
166         undef_init();
167         identify_cpu();
168
169         vm_ksubmap_init(&kmi);
170         bufinit();
171         vm_pager_bufferinit();
172 }
173
174 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
175
176 int
177 cpu_idle_wakeup(int cpu)
178 {
179
180         return (0);
181 }
182
183 int
184 fill_regs(struct thread *td, struct reg *regs)
185 {
186         struct trapframe *frame;
187
188         frame = td->td_frame;
189         regs->sp = frame->tf_sp;
190         regs->lr = frame->tf_lr;
191         regs->elr = frame->tf_elr;
192         regs->spsr = frame->tf_spsr;
193
194         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
195
196         return (0);
197 }
198
199 int
200 set_regs(struct thread *td, struct reg *regs)
201 {
202         struct trapframe *frame;
203
204         frame = td->td_frame;
205         frame->tf_sp = regs->sp;
206         frame->tf_lr = regs->lr;
207         frame->tf_elr = regs->elr;
208         frame->tf_spsr = regs->spsr;
209
210         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
211
212         return (0);
213 }
214
215 int
216 fill_fpregs(struct thread *td, struct fpreg *regs)
217 {
218 #ifdef VFP
219         struct pcb *pcb;
220
221         pcb = td->td_pcb;
222         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
223                 /*
224                  * If we have just been running VFP instructions we will
225                  * need to save the state to memcpy it below.
226                  */
227                 if (td == curthread)
228                         vfp_save_state(td, pcb);
229
230                 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
231                     ("Called fill_fpregs while the kernel is using the VFP"));
232                 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
233                     sizeof(regs->fp_q));
234                 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
235                 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
236         } else
237 #endif
238                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
239         return (0);
240 }
241
242 int
243 set_fpregs(struct thread *td, struct fpreg *regs)
244 {
245 #ifdef VFP
246         struct pcb *pcb;
247
248         pcb = td->td_pcb;
249         KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
250             ("Called set_fpregs while the kernel is using the VFP"));
251         memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
252         pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
253         pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
254 #endif
255         return (0);
256 }
257
258 int
259 fill_dbregs(struct thread *td, struct dbreg *regs)
260 {
261
262         printf("ARM64TODO: fill_dbregs");
263         return (EDOOFUS);
264 }
265
266 int
267 set_dbregs(struct thread *td, struct dbreg *regs)
268 {
269
270         printf("ARM64TODO: set_dbregs");
271         return (EDOOFUS);
272 }
273
274 int
275 ptrace_set_pc(struct thread *td, u_long addr)
276 {
277
278         printf("ARM64TODO: ptrace_set_pc");
279         return (EDOOFUS);
280 }
281
282 int
283 ptrace_single_step(struct thread *td)
284 {
285
286         td->td_frame->tf_spsr |= PSR_SS;
287         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
288         return (0);
289 }
290
291 int
292 ptrace_clear_single_step(struct thread *td)
293 {
294
295         td->td_frame->tf_spsr &= ~PSR_SS;
296         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
297         return (0);
298 }
299
300 void
301 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
302 {
303         struct trapframe *tf = td->td_frame;
304
305         memset(tf, 0, sizeof(struct trapframe));
306
307         /*
308          * We need to set x0 for init as it doesn't call
309          * cpu_set_syscall_retval to copy the value. We also
310          * need to set td_retval for the cases where we do.
311          */
312         tf->tf_x[0] = td->td_retval[0] = stack;
313         tf->tf_sp = STACKALIGN(stack);
314         tf->tf_lr = imgp->entry_addr;
315         tf->tf_elr = imgp->entry_addr;
316 }
317
318 /* Sanity check these are the same size, they will be memcpy'd to and fro */
319 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
320     sizeof((struct gpregs *)0)->gp_x);
321 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
322     sizeof((struct reg *)0)->x);
323
324 int
325 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
326 {
327         struct trapframe *tf = td->td_frame;
328
329         if (clear_ret & GET_MC_CLEAR_RET) {
330                 mcp->mc_gpregs.gp_x[0] = 0;
331                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
332         } else {
333                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
334                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
335         }
336
337         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
338             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
339
340         mcp->mc_gpregs.gp_sp = tf->tf_sp;
341         mcp->mc_gpregs.gp_lr = tf->tf_lr;
342         mcp->mc_gpregs.gp_elr = tf->tf_elr;
343
344         return (0);
345 }
346
347 int
348 set_mcontext(struct thread *td, mcontext_t *mcp)
349 {
350         struct trapframe *tf = td->td_frame;
351
352         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
353
354         tf->tf_sp = mcp->mc_gpregs.gp_sp;
355         tf->tf_lr = mcp->mc_gpregs.gp_lr;
356         tf->tf_elr = mcp->mc_gpregs.gp_elr;
357         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
358
359         return (0);
360 }
361
362 static void
363 get_fpcontext(struct thread *td, mcontext_t *mcp)
364 {
365 #ifdef VFP
366         struct pcb *curpcb;
367
368         critical_enter();
369
370         curpcb = curthread->td_pcb;
371
372         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
373                 /*
374                  * If we have just been running VFP instructions we will
375                  * need to save the state to memcpy it below.
376                  */
377                 vfp_save_state(td, curpcb);
378
379                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
380                     ("Called get_fpcontext while the kernel is using the VFP"));
381                 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
382                     ("Non-userspace FPU flags set in get_fpcontext"));
383                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
384                     sizeof(mcp->mc_fpregs));
385                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
386                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
387                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
388                 mcp->mc_flags |= _MC_FP_VALID;
389         }
390
391         critical_exit();
392 #endif
393 }
394
395 static void
396 set_fpcontext(struct thread *td, mcontext_t *mcp)
397 {
398 #ifdef VFP
399         struct pcb *curpcb;
400
401         critical_enter();
402
403         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
404                 curpcb = curthread->td_pcb;
405
406                 /*
407                  * Discard any vfp state for the current thread, we
408                  * are about to override it.
409                  */
410                 vfp_discard(td);
411
412                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
413                     ("Called set_fpcontext while the kernel is using the VFP"));
414                 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
415                     sizeof(mcp->mc_fpregs));
416                 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
417                 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
418                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
419         }
420
421         critical_exit();
422 #endif
423 }
424
425 void
426 cpu_idle(int busy)
427 {
428
429         spinlock_enter();
430         if (!busy)
431                 cpu_idleclock();
432         if (!sched_runnable())
433                 __asm __volatile(
434                     "dsb sy \n"
435                     "wfi    \n");
436         if (!busy)
437                 cpu_activeclock();
438         spinlock_exit();
439 }
440
441 void
442 cpu_halt(void)
443 {
444
445         /* We should have shutdown by now, if not enter a low power sleep */
446         intr_disable();
447         while (1) {
448                 __asm __volatile("wfi");
449         }
450 }
451
452 /*
453  * Flush the D-cache for non-DMA I/O so that the I-cache can
454  * be made coherent later.
455  */
456 void
457 cpu_flush_dcache(void *ptr, size_t len)
458 {
459
460         /* ARM64TODO TBD */
461 }
462
463 /* Get current clock frequency for the given CPU ID. */
464 int
465 cpu_est_clockrate(int cpu_id, uint64_t *rate)
466 {
467         struct pcpu *pc;
468
469         pc = pcpu_find(cpu_id);
470         if (pc == NULL || rate == NULL)
471                 return (EINVAL);
472
473         if (pc->pc_clock == 0)
474                 return (EOPNOTSUPP);
475
476         *rate = pc->pc_clock;
477         return (0);
478 }
479
480 void
481 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
482 {
483
484         pcpu->pc_acpi_id = 0xffffffff;
485 }
486
487 void
488 spinlock_enter(void)
489 {
490         struct thread *td;
491         register_t daif;
492
493         td = curthread;
494         if (td->td_md.md_spinlock_count == 0) {
495                 daif = intr_disable();
496                 td->td_md.md_spinlock_count = 1;
497                 td->td_md.md_saved_daif = daif;
498         } else
499                 td->td_md.md_spinlock_count++;
500         critical_enter();
501 }
502
503 void
504 spinlock_exit(void)
505 {
506         struct thread *td;
507         register_t daif;
508
509         td = curthread;
510         critical_exit();
511         daif = td->td_md.md_saved_daif;
512         td->td_md.md_spinlock_count--;
513         if (td->td_md.md_spinlock_count == 0)
514                 intr_restore(daif);
515 }
516
517 #ifndef _SYS_SYSPROTO_H_
518 struct sigreturn_args {
519         ucontext_t *ucp;
520 };
521 #endif
522
523 int
524 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
525 {
526         ucontext_t uc;
527         uint32_t spsr;
528
529         if (uap == NULL)
530                 return (EFAULT);
531         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
532                 return (EFAULT);
533
534         spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
535         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
536             (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
537                 return (EINVAL); 
538
539         set_mcontext(td, &uc.uc_mcontext);
540         set_fpcontext(td, &uc.uc_mcontext);
541
542         /* Restore signal mask. */
543         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
544
545         return (EJUSTRETURN);
546 }
547
548 /*
549  * Construct a PCB from a trapframe. This is called from kdb_trap() where
550  * we want to start a backtrace from the function that caused us to enter
551  * the debugger. We have the context in the trapframe, but base the trace
552  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
553  * enough for a backtrace.
554  */
555 void
556 makectx(struct trapframe *tf, struct pcb *pcb)
557 {
558         int i;
559
560         for (i = 0; i < PCB_LR; i++)
561                 pcb->pcb_x[i] = tf->tf_x[i];
562
563         pcb->pcb_x[PCB_LR] = tf->tf_lr;
564         pcb->pcb_pc = tf->tf_elr;
565         pcb->pcb_sp = tf->tf_sp;
566 }
567
568 void
569 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
570 {
571         struct thread *td;
572         struct proc *p;
573         struct trapframe *tf;
574         struct sigframe *fp, frame;
575         struct sigacts *psp;
576         struct sysentvec *sysent;
577         int code, onstack, sig;
578
579         td = curthread;
580         p = td->td_proc;
581         PROC_LOCK_ASSERT(p, MA_OWNED);
582
583         sig = ksi->ksi_signo;
584         code = ksi->ksi_code;
585         psp = p->p_sigacts;
586         mtx_assert(&psp->ps_mtx, MA_OWNED);
587
588         tf = td->td_frame;
589         onstack = sigonstack(tf->tf_sp);
590
591         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
592             catcher, sig);
593
594         /* Allocate and validate space for the signal handler context. */
595         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
596             SIGISMEMBER(psp->ps_sigonstack, sig)) {
597                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
598                     td->td_sigstk.ss_size);
599 #if defined(COMPAT_43)
600                 td->td_sigstk.ss_flags |= SS_ONSTACK;
601 #endif
602         } else {
603                 fp = (struct sigframe *)td->td_frame->tf_sp;
604         }
605
606         /* Make room, keeping the stack aligned */
607         fp--;
608         fp = (struct sigframe *)STACKALIGN(fp);
609
610         /* Fill in the frame to copy out */
611         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
612         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
613         frame.sf_si = ksi->ksi_info;
614         frame.sf_uc.uc_sigmask = *mask;
615         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
616             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
617         frame.sf_uc.uc_stack = td->td_sigstk;
618         mtx_unlock(&psp->ps_mtx);
619         PROC_UNLOCK(td->td_proc);
620
621         /* Copy the sigframe out to the user's stack. */
622         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
623                 /* Process has trashed its stack. Kill it. */
624                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
625                 PROC_LOCK(p);
626                 sigexit(td, SIGILL);
627         }
628
629         tf->tf_x[0]= sig;
630         tf->tf_x[1] = (register_t)&fp->sf_si;
631         tf->tf_x[2] = (register_t)&fp->sf_uc;
632
633         tf->tf_elr = (register_t)catcher;
634         tf->tf_sp = (register_t)fp;
635         sysent = p->p_sysent;
636         if (sysent->sv_sigcode_base != 0)
637                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
638         else
639                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
640                     *(sysent->sv_szsigcode));
641
642         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
643             tf->tf_sp);
644
645         PROC_LOCK(p);
646         mtx_lock(&psp->ps_mtx);
647 }
648
649 static void
650 init_proc0(vm_offset_t kstack)
651 {
652         struct pcpu *pcpup = &__pcpu[0];
653
654         proc_linkup0(&proc0, &thread0);
655         thread0.td_kstack = kstack;
656         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
657         thread0.td_pcb->pcb_fpflags = 0;
658         thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
659         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
660         thread0.td_frame = &proc0_tf;
661         pcpup->pc_curpcb = thread0.td_pcb;
662 }
663
664 typedef struct {
665         uint32_t type;
666         uint64_t phys_start;
667         uint64_t virt_start;
668         uint64_t num_pages;
669         uint64_t attr;
670 } EFI_MEMORY_DESCRIPTOR;
671
672 static int
673 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
674     u_int *physmap_idxp)
675 {
676         u_int i, insert_idx, _physmap_idx;
677
678         _physmap_idx = *physmap_idxp;
679
680         if (length == 0)
681                 return (1);
682
683         /*
684          * Find insertion point while checking for overlap.  Start off by
685          * assuming the new entry will be added to the end.
686          */
687         insert_idx = _physmap_idx;
688         for (i = 0; i <= _physmap_idx; i += 2) {
689                 if (base < physmap[i + 1]) {
690                         if (base + length <= physmap[i]) {
691                                 insert_idx = i;
692                                 break;
693                         }
694                         if (boothowto & RB_VERBOSE)
695                                 printf(
696                     "Overlapping memory regions, ignoring second region\n");
697                         return (1);
698                 }
699         }
700
701         /* See if we can prepend to the next entry. */
702         if (insert_idx <= _physmap_idx &&
703             base + length == physmap[insert_idx]) {
704                 physmap[insert_idx] = base;
705                 return (1);
706         }
707
708         /* See if we can append to the previous entry. */
709         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
710                 physmap[insert_idx - 1] += length;
711                 return (1);
712         }
713
714         _physmap_idx += 2;
715         *physmap_idxp = _physmap_idx;
716         if (_physmap_idx == PHYSMAP_SIZE) {
717                 printf(
718                 "Too many segments in the physical address map, giving up\n");
719                 return (0);
720         }
721
722         /*
723          * Move the last 'N' entries down to make room for the new
724          * entry if needed.
725          */
726         for (i = _physmap_idx; i > insert_idx; i -= 2) {
727                 physmap[i] = physmap[i - 2];
728                 physmap[i + 1] = physmap[i - 1];
729         }
730
731         /* Insert the new entry. */
732         physmap[insert_idx] = base;
733         physmap[insert_idx + 1] = base + length;
734         return (1);
735 }
736
737 #ifdef FDT
738 static void
739 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
740     u_int *physmap_idxp)
741 {
742
743         for (int i = 0; i < mrcnt; i++) {
744                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
745                     physmap_idxp))
746                         break;
747         }
748 }
749 #endif
750
751 static void
752 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
753     u_int *physmap_idxp)
754 {
755         struct efi_md *map, *p;
756         const char *type;
757         size_t efisz;
758         int ndesc, i;
759
760         static const char *types[] = {
761                 "Reserved",
762                 "LoaderCode",
763                 "LoaderData",
764                 "BootServicesCode",
765                 "BootServicesData",
766                 "RuntimeServicesCode",
767                 "RuntimeServicesData",
768                 "ConventionalMemory",
769                 "UnusableMemory",
770                 "ACPIReclaimMemory",
771                 "ACPIMemoryNVS",
772                 "MemoryMappedIO",
773                 "MemoryMappedIOPortSpace",
774                 "PalCode",
775                 "PersistentMemory"
776         };
777
778         /*
779          * Memory map data provided by UEFI via the GetMemoryMap
780          * Boot Services API.
781          */
782         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
783         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
784
785         if (efihdr->descriptor_size == 0)
786                 return;
787         ndesc = efihdr->memory_size / efihdr->descriptor_size;
788
789         if (boothowto & RB_VERBOSE)
790                 printf("%23s %12s %12s %8s %4s\n",
791                     "Type", "Physical", "Virtual", "#Pages", "Attr");
792
793         for (i = 0, p = map; i < ndesc; i++,
794             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
795                 if (boothowto & RB_VERBOSE) {
796                         if (p->md_type < nitems(types))
797                                 type = types[p->md_type];
798                         else
799                                 type = "<INVALID>";
800                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
801                             p->md_virt, p->md_pages);
802                         if (p->md_attr & EFI_MD_ATTR_UC)
803                                 printf("UC ");
804                         if (p->md_attr & EFI_MD_ATTR_WC)
805                                 printf("WC ");
806                         if (p->md_attr & EFI_MD_ATTR_WT)
807                                 printf("WT ");
808                         if (p->md_attr & EFI_MD_ATTR_WB)
809                                 printf("WB ");
810                         if (p->md_attr & EFI_MD_ATTR_UCE)
811                                 printf("UCE ");
812                         if (p->md_attr & EFI_MD_ATTR_WP)
813                                 printf("WP ");
814                         if (p->md_attr & EFI_MD_ATTR_RP)
815                                 printf("RP ");
816                         if (p->md_attr & EFI_MD_ATTR_XP)
817                                 printf("XP ");
818                         if (p->md_attr & EFI_MD_ATTR_NV)
819                                 printf("NV ");
820                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
821                                 printf("MORE_RELIABLE ");
822                         if (p->md_attr & EFI_MD_ATTR_RO)
823                                 printf("RO ");
824                         if (p->md_attr & EFI_MD_ATTR_RT)
825                                 printf("RUNTIME");
826                         printf("\n");
827                 }
828
829                 switch (p->md_type) {
830                 case EFI_MD_TYPE_CODE:
831                 case EFI_MD_TYPE_DATA:
832                 case EFI_MD_TYPE_BS_CODE:
833                 case EFI_MD_TYPE_BS_DATA:
834                 case EFI_MD_TYPE_FREE:
835                         /*
836                          * We're allowed to use any entry with these types.
837                          */
838                         break;
839                 default:
840                         continue;
841                 }
842
843                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
844                     physmap, physmap_idxp))
845                         break;
846         }
847 }
848
849 #ifdef FDT
850 static void
851 try_load_dtb(caddr_t kmdp)
852 {
853         vm_offset_t dtbp;
854
855         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
856         if (dtbp == (vm_offset_t)NULL) {
857                 printf("ERROR loading DTB\n");
858                 return;
859         }
860
861         if (OF_install(OFW_FDT, 0) == FALSE)
862                 panic("Cannot install FDT");
863
864         if (OF_init((void *)dtbp) != 0)
865                 panic("OF_init failed with the found device tree");
866 }
867 #endif
868
869 static bool
870 bus_probe(void)
871 {
872         bool has_acpi, has_fdt;
873         char *order, *env;
874
875         has_acpi = has_fdt = false;
876
877 #ifdef FDT
878         has_fdt = (OF_peer(0) != 0);
879 #endif
880 #ifdef DEV_ACPI
881         has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
882 #endif
883
884         env = kern_getenv("kern.cfg.order");
885         if (env != NULL) {
886                 order = env;
887                 while (order != NULL) {
888                         if (has_acpi &&
889                             strncmp(order, "acpi", 4) == 0 &&
890                             (order[4] == ',' || order[4] == '\0')) {
891                                 arm64_bus_method = ARM64_BUS_ACPI;
892                                 break;
893                         }
894                         if (has_fdt &&
895                             strncmp(order, "fdt", 3) == 0 &&
896                             (order[3] == ',' || order[3] == '\0')) {
897                                 arm64_bus_method = ARM64_BUS_FDT;
898                                 break;
899                         }
900                         order = strchr(order, ',');
901                 }
902                 freeenv(env);
903
904                 /* If we set the bus method it is valid */
905                 if (arm64_bus_method != ARM64_BUS_NONE)
906                         return (true);
907         }
908         /* If no order or an invalid order was set use the default */
909         if (arm64_bus_method == ARM64_BUS_NONE) {
910                 if (has_fdt)
911                         arm64_bus_method = ARM64_BUS_FDT;
912                 else if (has_acpi)
913                         arm64_bus_method = ARM64_BUS_ACPI;
914         }
915
916         /*
917          * If no option was set the default is valid, otherwise we are
918          * setting one to get cninit() working, then calling panic to tell
919          * the user about the invalid bus setup.
920          */
921         return (env == NULL);
922 }
923
924 static void
925 cache_setup(void)
926 {
927         int dcache_line_shift, icache_line_shift, dczva_line_shift;
928         uint32_t ctr_el0;
929         uint32_t dczid_el0;
930
931         ctr_el0 = READ_SPECIALREG(ctr_el0);
932
933         /* Read the log2 words in each D cache line */
934         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
935         /* Get the D cache line size */
936         dcache_line_size = sizeof(int) << dcache_line_shift;
937
938         /* And the same for the I cache */
939         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
940         icache_line_size = sizeof(int) << icache_line_shift;
941
942         idcache_line_size = MIN(dcache_line_size, icache_line_size);
943
944         dczid_el0 = READ_SPECIALREG(dczid_el0);
945
946         /* Check if dc zva is not prohibited */
947         if (dczid_el0 & DCZID_DZP)
948                 dczva_line_size = 0;
949         else {
950                 /* Same as with above calculations */
951                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
952                 dczva_line_size = sizeof(int) << dczva_line_shift;
953
954                 /* Change pagezero function */
955                 pagezero = pagezero_cache;
956         }
957 }
958
959 void
960 initarm(struct arm64_bootparams *abp)
961 {
962         struct efi_map_header *efihdr;
963         struct pcpu *pcpup;
964 #ifdef FDT
965         struct mem_region mem_regions[FDT_MEM_REGIONS];
966         int mem_regions_sz;
967 #endif
968         vm_offset_t lastaddr;
969         caddr_t kmdp;
970         vm_paddr_t mem_len;
971         bool valid;
972         int i;
973
974         /* Set the module data location */
975         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
976
977         /* Find the kernel address */
978         kmdp = preload_search_by_type("elf kernel");
979         if (kmdp == NULL)
980                 kmdp = preload_search_by_type("elf64 kernel");
981
982         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
983         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
984
985 #ifdef FDT
986         try_load_dtb(kmdp);
987 #endif
988
989         /* Find the address to start allocating from */
990         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
991
992         /* Load the physical memory ranges */
993         physmap_idx = 0;
994         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
995             MODINFO_METADATA | MODINFOMD_EFI_MAP);
996         if (efihdr != NULL)
997                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
998 #ifdef FDT
999         else {
1000                 /* Grab physical memory regions information from device tree. */
1001                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1002                     NULL) != 0)
1003                         panic("Cannot get physical memory regions");
1004                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
1005                     &physmap_idx);
1006         }
1007 #endif
1008
1009         /* Print the memory map */
1010         mem_len = 0;
1011         for (i = 0; i < physmap_idx; i += 2) {
1012                 dump_avail[i] = physmap[i];
1013                 dump_avail[i + 1] = physmap[i + 1];
1014                 mem_len += physmap[i + 1] - physmap[i];
1015         }
1016         dump_avail[i] = 0;
1017         dump_avail[i + 1] = 0;
1018
1019         /* Set the pcpu data, this is needed by pmap_bootstrap */
1020         pcpup = &__pcpu[0];
1021         pcpu_init(pcpup, 0, sizeof(struct pcpu));
1022
1023         /*
1024          * Set the pcpu pointer with a backup in tpidr_el1 to be
1025          * loaded when entering the kernel from userland.
1026          */
1027         __asm __volatile(
1028             "mov x18, %0 \n"
1029             "msr tpidr_el1, %0" :: "r"(pcpup));
1030
1031         PCPU_SET(curthread, &thread0);
1032
1033         /* Do basic tuning, hz etc */
1034         init_param1();
1035
1036         cache_setup();
1037         pan_setup();
1038
1039         /* Bootstrap enough of pmap  to enter the kernel proper */
1040         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1041             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1042
1043         devmap_bootstrap(0, NULL);
1044
1045         valid = bus_probe();
1046
1047         cninit();
1048
1049         if (!valid)
1050                 panic("Invalid bus configuration: %s",
1051                     kern_getenv("kern.cfg.order"));
1052
1053         init_proc0(abp->kern_stack);
1054         msgbufinit(msgbufp, msgbufsize);
1055         mutex_init();
1056         init_param2(physmem);
1057
1058         dbg_init();
1059         kdb_init();
1060         pan_enable();
1061
1062         early_boot = 0;
1063 }
1064
1065 void
1066 dbg_init(void)
1067 {
1068
1069         /* Clear OS lock */
1070         WRITE_SPECIALREG(OSLAR_EL1, 0);
1071
1072         /* This permits DDB to use debug registers for watchpoints. */
1073         dbg_monitor_init();
1074
1075         /* TODO: Eventually will need to initialize debug registers here. */
1076 }
1077
1078 #ifdef DDB
1079 #include <ddb/ddb.h>
1080
1081 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1082 {
1083 #define PRINT_REG(reg)  \
1084     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1085
1086         PRINT_REG(actlr_el1);
1087         PRINT_REG(afsr0_el1);
1088         PRINT_REG(afsr1_el1);
1089         PRINT_REG(aidr_el1);
1090         PRINT_REG(amair_el1);
1091         PRINT_REG(ccsidr_el1);
1092         PRINT_REG(clidr_el1);
1093         PRINT_REG(contextidr_el1);
1094         PRINT_REG(cpacr_el1);
1095         PRINT_REG(csselr_el1);
1096         PRINT_REG(ctr_el0);
1097         PRINT_REG(currentel);
1098         PRINT_REG(daif);
1099         PRINT_REG(dczid_el0);
1100         PRINT_REG(elr_el1);
1101         PRINT_REG(esr_el1);
1102         PRINT_REG(far_el1);
1103 #if 0
1104         /* ARM64TODO: Enable VFP before reading floating-point registers */
1105         PRINT_REG(fpcr);
1106         PRINT_REG(fpsr);
1107 #endif
1108         PRINT_REG(id_aa64afr0_el1);
1109         PRINT_REG(id_aa64afr1_el1);
1110         PRINT_REG(id_aa64dfr0_el1);
1111         PRINT_REG(id_aa64dfr1_el1);
1112         PRINT_REG(id_aa64isar0_el1);
1113         PRINT_REG(id_aa64isar1_el1);
1114         PRINT_REG(id_aa64pfr0_el1);
1115         PRINT_REG(id_aa64pfr1_el1);
1116         PRINT_REG(id_afr0_el1);
1117         PRINT_REG(id_dfr0_el1);
1118         PRINT_REG(id_isar0_el1);
1119         PRINT_REG(id_isar1_el1);
1120         PRINT_REG(id_isar2_el1);
1121         PRINT_REG(id_isar3_el1);
1122         PRINT_REG(id_isar4_el1);
1123         PRINT_REG(id_isar5_el1);
1124         PRINT_REG(id_mmfr0_el1);
1125         PRINT_REG(id_mmfr1_el1);
1126         PRINT_REG(id_mmfr2_el1);
1127         PRINT_REG(id_mmfr3_el1);
1128 #if 0
1129         /* Missing from llvm */
1130         PRINT_REG(id_mmfr4_el1);
1131 #endif
1132         PRINT_REG(id_pfr0_el1);
1133         PRINT_REG(id_pfr1_el1);
1134         PRINT_REG(isr_el1);
1135         PRINT_REG(mair_el1);
1136         PRINT_REG(midr_el1);
1137         PRINT_REG(mpidr_el1);
1138         PRINT_REG(mvfr0_el1);
1139         PRINT_REG(mvfr1_el1);
1140         PRINT_REG(mvfr2_el1);
1141         PRINT_REG(revidr_el1);
1142         PRINT_REG(sctlr_el1);
1143         PRINT_REG(sp_el0);
1144         PRINT_REG(spsel);
1145         PRINT_REG(spsr_el1);
1146         PRINT_REG(tcr_el1);
1147         PRINT_REG(tpidr_el0);
1148         PRINT_REG(tpidr_el1);
1149         PRINT_REG(tpidrro_el0);
1150         PRINT_REG(ttbr0_el1);
1151         PRINT_REG(ttbr1_el1);
1152         PRINT_REG(vbar_el1);
1153 #undef PRINT_REG
1154 }
1155
1156 DB_SHOW_COMMAND(vtop, db_show_vtop)
1157 {
1158         uint64_t phys;
1159
1160         if (have_addr) {
1161                 phys = arm64_address_translate_s1e1r(addr);
1162                 db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1163                 phys = arm64_address_translate_s1e1w(addr);
1164                 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1165                 phys = arm64_address_translate_s1e0r(addr);
1166                 db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1167                 phys = arm64_address_translate_s1e0w(addr);
1168                 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1169         } else
1170                 db_printf("show vtop <virt_addr>\n");
1171 }
1172 #endif