]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
MFV r311899:
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/devmap.h>
42 #include <sys/efi.h>
43 #include <sys/exec.h>
44 #include <sys/imgact.h>
45 #include <sys/kdb.h> 
46 #include <sys/kernel.h>
47 #include <sys/limits.h>
48 #include <sys/linker.h>
49 #include <sys/msgbuf.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/ptrace.h>
53 #include <sys/reboot.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/signalvar.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
59 #include <sys/sysproto.h>
60 #include <sys/ucontext.h>
61 #include <sys/vdso.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_pager.h>
70
71 #include <machine/armreg.h>
72 #include <machine/cpu.h>
73 #include <machine/debug_monitor.h>
74 #include <machine/kdb.h>
75 #include <machine/machdep.h>
76 #include <machine/metadata.h>
77 #include <machine/md_var.h>
78 #include <machine/pcb.h>
79 #include <machine/reg.h>
80 #include <machine/vmparam.h>
81
82 #ifdef VFP
83 #include <machine/vfp.h>
84 #endif
85
86 #ifdef DEV_ACPI
87 #include <contrib/dev/acpica/include/acpi.h>
88 #include <machine/acpica_machdep.h>
89 #endif
90
91 #ifdef FDT
92 #include <dev/fdt/fdt_common.h>
93 #include <dev/ofw/openfirm.h>
94 #endif
95
96
97 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
98
99 struct pcpu __pcpu[MAXCPU];
100
101 static struct trapframe proc0_tf;
102
103 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
104 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
105
106 int early_boot = 1;
107 int cold = 1;
108 long realmem = 0;
109 long Maxmem = 0;
110
111 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
112 vm_paddr_t physmap[PHYSMAP_SIZE];
113 u_int physmap_idx;
114
115 struct kva_md_info kmi;
116
117 int64_t dcache_line_size;       /* The minimum D cache line size */
118 int64_t icache_line_size;       /* The minimum I cache line size */
119 int64_t idcache_line_size;      /* The minimum cache line size */
120 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
121
122 /* pagezero_* implementations are provided in support.S */
123 void pagezero_simple(void *);
124 void pagezero_cache(void *);
125
126 /* pagezero_simple is default pagezero */
127 void (*pagezero)(void *p) = pagezero_simple;
128
129 static void
130 cpu_startup(void *dummy)
131 {
132
133         identify_cpu();
134
135         vm_ksubmap_init(&kmi);
136         bufinit();
137         vm_pager_bufferinit();
138 }
139
140 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
141
142 int
143 cpu_idle_wakeup(int cpu)
144 {
145
146         return (0);
147 }
148
149 int
150 fill_regs(struct thread *td, struct reg *regs)
151 {
152         struct trapframe *frame;
153
154         frame = td->td_frame;
155         regs->sp = frame->tf_sp;
156         regs->lr = frame->tf_lr;
157         regs->elr = frame->tf_elr;
158         regs->spsr = frame->tf_spsr;
159
160         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
161
162         return (0);
163 }
164
165 int
166 set_regs(struct thread *td, struct reg *regs)
167 {
168         struct trapframe *frame;
169
170         frame = td->td_frame;
171         frame->tf_sp = regs->sp;
172         frame->tf_lr = regs->lr;
173         frame->tf_elr = regs->elr;
174         frame->tf_spsr = regs->spsr;
175
176         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
177
178         return (0);
179 }
180
181 int
182 fill_fpregs(struct thread *td, struct fpreg *regs)
183 {
184 #ifdef VFP
185         struct pcb *pcb;
186
187         pcb = td->td_pcb;
188         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
189                 /*
190                  * If we have just been running VFP instructions we will
191                  * need to save the state to memcpy it below.
192                  */
193                 vfp_save_state(td, pcb);
194
195                 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
196                     ("Called fill_fpregs while the kernel is using the VFP"));
197                 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
198                     sizeof(regs->fp_q));
199                 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
200                 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
201         } else
202 #endif
203                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
204         return (0);
205 }
206
207 int
208 set_fpregs(struct thread *td, struct fpreg *regs)
209 {
210 #ifdef VFP
211         struct pcb *pcb;
212
213         pcb = td->td_pcb;
214         KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
215             ("Called set_fpregs while the kernel is using the VFP"));
216         memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
217         pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
218         pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
219 #endif
220         return (0);
221 }
222
223 int
224 fill_dbregs(struct thread *td, struct dbreg *regs)
225 {
226
227         panic("ARM64TODO: fill_dbregs");
228 }
229
230 int
231 set_dbregs(struct thread *td, struct dbreg *regs)
232 {
233
234         panic("ARM64TODO: set_dbregs");
235 }
236
237 int
238 ptrace_set_pc(struct thread *td, u_long addr)
239 {
240
241         panic("ARM64TODO: ptrace_set_pc");
242         return (0);
243 }
244
245 int
246 ptrace_single_step(struct thread *td)
247 {
248
249         td->td_frame->tf_spsr |= PSR_SS;
250         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
251         return (0);
252 }
253
254 int
255 ptrace_clear_single_step(struct thread *td)
256 {
257
258         td->td_frame->tf_spsr &= ~PSR_SS;
259         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
260         return (0);
261 }
262
263 void
264 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
265 {
266         struct trapframe *tf = td->td_frame;
267
268         memset(tf, 0, sizeof(struct trapframe));
269
270         /*
271          * We need to set x0 for init as it doesn't call
272          * cpu_set_syscall_retval to copy the value. We also
273          * need to set td_retval for the cases where we do.
274          */
275         tf->tf_x[0] = td->td_retval[0] = stack;
276         tf->tf_sp = STACKALIGN(stack);
277         tf->tf_lr = imgp->entry_addr;
278         tf->tf_elr = imgp->entry_addr;
279 }
280
281 /* Sanity check these are the same size, they will be memcpy'd to and fro */
282 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
283     sizeof((struct gpregs *)0)->gp_x);
284 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
285     sizeof((struct reg *)0)->x);
286
287 int
288 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
289 {
290         struct trapframe *tf = td->td_frame;
291
292         if (clear_ret & GET_MC_CLEAR_RET) {
293                 mcp->mc_gpregs.gp_x[0] = 0;
294                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
295         } else {
296                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
297                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
298         }
299
300         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
301             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
302
303         mcp->mc_gpregs.gp_sp = tf->tf_sp;
304         mcp->mc_gpregs.gp_lr = tf->tf_lr;
305         mcp->mc_gpregs.gp_elr = tf->tf_elr;
306
307         return (0);
308 }
309
310 int
311 set_mcontext(struct thread *td, mcontext_t *mcp)
312 {
313         struct trapframe *tf = td->td_frame;
314
315         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
316
317         tf->tf_sp = mcp->mc_gpregs.gp_sp;
318         tf->tf_lr = mcp->mc_gpregs.gp_lr;
319         tf->tf_elr = mcp->mc_gpregs.gp_elr;
320         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
321
322         return (0);
323 }
324
325 static void
326 get_fpcontext(struct thread *td, mcontext_t *mcp)
327 {
328 #ifdef VFP
329         struct pcb *curpcb;
330
331         critical_enter();
332
333         curpcb = curthread->td_pcb;
334
335         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
336                 /*
337                  * If we have just been running VFP instructions we will
338                  * need to save the state to memcpy it below.
339                  */
340                 vfp_save_state(td, curpcb);
341
342                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
343                     ("Called get_fpcontext while the kernel is using the VFP"));
344                 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
345                     ("Non-userspace FPU flags set in get_fpcontext"));
346                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
347                     sizeof(mcp->mc_fpregs));
348                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
349                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
350                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
351                 mcp->mc_flags |= _MC_FP_VALID;
352         }
353
354         critical_exit();
355 #endif
356 }
357
358 static void
359 set_fpcontext(struct thread *td, mcontext_t *mcp)
360 {
361 #ifdef VFP
362         struct pcb *curpcb;
363
364         critical_enter();
365
366         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
367                 curpcb = curthread->td_pcb;
368
369                 /*
370                  * Discard any vfp state for the current thread, we
371                  * are about to override it.
372                  */
373                 vfp_discard(td);
374
375                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
376                     ("Called set_fpcontext while the kernel is using the VFP"));
377                 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
378                     sizeof(mcp->mc_fpregs));
379                 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
380                 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
381                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
382         }
383
384         critical_exit();
385 #endif
386 }
387
388 void
389 cpu_idle(int busy)
390 {
391
392         spinlock_enter();
393         if (!busy)
394                 cpu_idleclock();
395         if (!sched_runnable())
396                 __asm __volatile(
397                     "dsb sy \n"
398                     "wfi    \n");
399         if (!busy)
400                 cpu_activeclock();
401         spinlock_exit();
402 }
403
404 void
405 cpu_halt(void)
406 {
407
408         /* We should have shutdown by now, if not enter a low power sleep */
409         intr_disable();
410         while (1) {
411                 __asm __volatile("wfi");
412         }
413 }
414
415 /*
416  * Flush the D-cache for non-DMA I/O so that the I-cache can
417  * be made coherent later.
418  */
419 void
420 cpu_flush_dcache(void *ptr, size_t len)
421 {
422
423         /* ARM64TODO TBD */
424 }
425
426 /* Get current clock frequency for the given CPU ID. */
427 int
428 cpu_est_clockrate(int cpu_id, uint64_t *rate)
429 {
430         struct pcpu *pc;
431
432         pc = pcpu_find(cpu_id);
433         if (pc == NULL || rate == NULL)
434                 return (EINVAL);
435
436         if (pc->pc_clock == 0)
437                 return (EOPNOTSUPP);
438
439         *rate = pc->pc_clock;
440         return (0);
441 }
442
443 void
444 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
445 {
446
447         pcpu->pc_acpi_id = 0xffffffff;
448 }
449
450 void
451 spinlock_enter(void)
452 {
453         struct thread *td;
454         register_t daif;
455
456         td = curthread;
457         if (td->td_md.md_spinlock_count == 0) {
458                 daif = intr_disable();
459                 td->td_md.md_spinlock_count = 1;
460                 td->td_md.md_saved_daif = daif;
461         } else
462                 td->td_md.md_spinlock_count++;
463         critical_enter();
464 }
465
466 void
467 spinlock_exit(void)
468 {
469         struct thread *td;
470         register_t daif;
471
472         td = curthread;
473         critical_exit();
474         daif = td->td_md.md_saved_daif;
475         td->td_md.md_spinlock_count--;
476         if (td->td_md.md_spinlock_count == 0)
477                 intr_restore(daif);
478 }
479
480 #ifndef _SYS_SYSPROTO_H_
481 struct sigreturn_args {
482         ucontext_t *ucp;
483 };
484 #endif
485
486 int
487 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
488 {
489         ucontext_t uc;
490         uint32_t spsr;
491
492         if (uap == NULL)
493                 return (EFAULT);
494         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
495                 return (EFAULT);
496
497         spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
498         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
499             (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
500                 return (EINVAL); 
501
502         set_mcontext(td, &uc.uc_mcontext);
503         set_fpcontext(td, &uc.uc_mcontext);
504
505         /* Restore signal mask. */
506         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
507
508         return (EJUSTRETURN);
509 }
510
511 /*
512  * Construct a PCB from a trapframe. This is called from kdb_trap() where
513  * we want to start a backtrace from the function that caused us to enter
514  * the debugger. We have the context in the trapframe, but base the trace
515  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
516  * enough for a backtrace.
517  */
518 void
519 makectx(struct trapframe *tf, struct pcb *pcb)
520 {
521         int i;
522
523         for (i = 0; i < PCB_LR; i++)
524                 pcb->pcb_x[i] = tf->tf_x[i];
525
526         pcb->pcb_x[PCB_LR] = tf->tf_lr;
527         pcb->pcb_pc = tf->tf_elr;
528         pcb->pcb_sp = tf->tf_sp;
529 }
530
531 void
532 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
533 {
534         struct thread *td;
535         struct proc *p;
536         struct trapframe *tf;
537         struct sigframe *fp, frame;
538         struct sigacts *psp;
539         struct sysentvec *sysent;
540         int code, onstack, sig;
541
542         td = curthread;
543         p = td->td_proc;
544         PROC_LOCK_ASSERT(p, MA_OWNED);
545
546         sig = ksi->ksi_signo;
547         code = ksi->ksi_code;
548         psp = p->p_sigacts;
549         mtx_assert(&psp->ps_mtx, MA_OWNED);
550
551         tf = td->td_frame;
552         onstack = sigonstack(tf->tf_sp);
553
554         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
555             catcher, sig);
556
557         /* Allocate and validate space for the signal handler context. */
558         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
559             SIGISMEMBER(psp->ps_sigonstack, sig)) {
560                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
561                     td->td_sigstk.ss_size);
562 #if defined(COMPAT_43)
563                 td->td_sigstk.ss_flags |= SS_ONSTACK;
564 #endif
565         } else {
566                 fp = (struct sigframe *)td->td_frame->tf_sp;
567         }
568
569         /* Make room, keeping the stack aligned */
570         fp--;
571         fp = (struct sigframe *)STACKALIGN(fp);
572
573         /* Fill in the frame to copy out */
574         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
575         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
576         frame.sf_si = ksi->ksi_info;
577         frame.sf_uc.uc_sigmask = *mask;
578         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
579             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
580         frame.sf_uc.uc_stack = td->td_sigstk;
581         mtx_unlock(&psp->ps_mtx);
582         PROC_UNLOCK(td->td_proc);
583
584         /* Copy the sigframe out to the user's stack. */
585         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
586                 /* Process has trashed its stack. Kill it. */
587                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
588                 PROC_LOCK(p);
589                 sigexit(td, SIGILL);
590         }
591
592         tf->tf_x[0]= sig;
593         tf->tf_x[1] = (register_t)&fp->sf_si;
594         tf->tf_x[2] = (register_t)&fp->sf_uc;
595
596         tf->tf_elr = (register_t)catcher;
597         tf->tf_sp = (register_t)fp;
598         sysent = p->p_sysent;
599         if (sysent->sv_sigcode_base != 0)
600                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
601         else
602                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
603                     *(sysent->sv_szsigcode));
604
605         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
606             tf->tf_sp);
607
608         PROC_LOCK(p);
609         mtx_lock(&psp->ps_mtx);
610 }
611
612 static void
613 init_proc0(vm_offset_t kstack)
614 {
615         struct pcpu *pcpup = &__pcpu[0];
616
617         proc_linkup0(&proc0, &thread0);
618         thread0.td_kstack = kstack;
619         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
620         thread0.td_pcb->pcb_fpflags = 0;
621         thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
622         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
623         thread0.td_frame = &proc0_tf;
624         pcpup->pc_curpcb = thread0.td_pcb;
625 }
626
627 typedef struct {
628         uint32_t type;
629         uint64_t phys_start;
630         uint64_t virt_start;
631         uint64_t num_pages;
632         uint64_t attr;
633 } EFI_MEMORY_DESCRIPTOR;
634
635 static int
636 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
637     u_int *physmap_idxp)
638 {
639         u_int i, insert_idx, _physmap_idx;
640
641         _physmap_idx = *physmap_idxp;
642
643         if (length == 0)
644                 return (1);
645
646         /*
647          * Find insertion point while checking for overlap.  Start off by
648          * assuming the new entry will be added to the end.
649          */
650         insert_idx = _physmap_idx;
651         for (i = 0; i <= _physmap_idx; i += 2) {
652                 if (base < physmap[i + 1]) {
653                         if (base + length <= physmap[i]) {
654                                 insert_idx = i;
655                                 break;
656                         }
657                         if (boothowto & RB_VERBOSE)
658                                 printf(
659                     "Overlapping memory regions, ignoring second region\n");
660                         return (1);
661                 }
662         }
663
664         /* See if we can prepend to the next entry. */
665         if (insert_idx <= _physmap_idx &&
666             base + length == physmap[insert_idx]) {
667                 physmap[insert_idx] = base;
668                 return (1);
669         }
670
671         /* See if we can append to the previous entry. */
672         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
673                 physmap[insert_idx - 1] += length;
674                 return (1);
675         }
676
677         _physmap_idx += 2;
678         *physmap_idxp = _physmap_idx;
679         if (_physmap_idx == PHYSMAP_SIZE) {
680                 printf(
681                 "Too many segments in the physical address map, giving up\n");
682                 return (0);
683         }
684
685         /*
686          * Move the last 'N' entries down to make room for the new
687          * entry if needed.
688          */
689         for (i = _physmap_idx; i > insert_idx; i -= 2) {
690                 physmap[i] = physmap[i - 2];
691                 physmap[i + 1] = physmap[i - 1];
692         }
693
694         /* Insert the new entry. */
695         physmap[insert_idx] = base;
696         physmap[insert_idx + 1] = base + length;
697         return (1);
698 }
699
700 #ifdef FDT
701 static void
702 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
703     u_int *physmap_idxp)
704 {
705
706         for (int i = 0; i < mrcnt; i++) {
707                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
708                     physmap_idxp))
709                         break;
710         }
711 }
712 #endif
713
714 static void
715 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
716     u_int *physmap_idxp)
717 {
718         struct efi_md *map, *p;
719         const char *type;
720         size_t efisz;
721         int ndesc, i;
722
723         static const char *types[] = {
724                 "Reserved",
725                 "LoaderCode",
726                 "LoaderData",
727                 "BootServicesCode",
728                 "BootServicesData",
729                 "RuntimeServicesCode",
730                 "RuntimeServicesData",
731                 "ConventionalMemory",
732                 "UnusableMemory",
733                 "ACPIReclaimMemory",
734                 "ACPIMemoryNVS",
735                 "MemoryMappedIO",
736                 "MemoryMappedIOPortSpace",
737                 "PalCode",
738                 "PersistentMemory"
739         };
740
741         /*
742          * Memory map data provided by UEFI via the GetMemoryMap
743          * Boot Services API.
744          */
745         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
746         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
747
748         if (efihdr->descriptor_size == 0)
749                 return;
750         ndesc = efihdr->memory_size / efihdr->descriptor_size;
751
752         if (boothowto & RB_VERBOSE)
753                 printf("%23s %12s %12s %8s %4s\n",
754                     "Type", "Physical", "Virtual", "#Pages", "Attr");
755
756         for (i = 0, p = map; i < ndesc; i++,
757             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
758                 if (boothowto & RB_VERBOSE) {
759                         if (p->md_type < nitems(types))
760                                 type = types[p->md_type];
761                         else
762                                 type = "<INVALID>";
763                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
764                             p->md_virt, p->md_pages);
765                         if (p->md_attr & EFI_MD_ATTR_UC)
766                                 printf("UC ");
767                         if (p->md_attr & EFI_MD_ATTR_WC)
768                                 printf("WC ");
769                         if (p->md_attr & EFI_MD_ATTR_WT)
770                                 printf("WT ");
771                         if (p->md_attr & EFI_MD_ATTR_WB)
772                                 printf("WB ");
773                         if (p->md_attr & EFI_MD_ATTR_UCE)
774                                 printf("UCE ");
775                         if (p->md_attr & EFI_MD_ATTR_WP)
776                                 printf("WP ");
777                         if (p->md_attr & EFI_MD_ATTR_RP)
778                                 printf("RP ");
779                         if (p->md_attr & EFI_MD_ATTR_XP)
780                                 printf("XP ");
781                         if (p->md_attr & EFI_MD_ATTR_NV)
782                                 printf("NV ");
783                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
784                                 printf("MORE_RELIABLE ");
785                         if (p->md_attr & EFI_MD_ATTR_RO)
786                                 printf("RO ");
787                         if (p->md_attr & EFI_MD_ATTR_RT)
788                                 printf("RUNTIME");
789                         printf("\n");
790                 }
791
792                 switch (p->md_type) {
793                 case EFI_MD_TYPE_CODE:
794                 case EFI_MD_TYPE_DATA:
795                 case EFI_MD_TYPE_BS_CODE:
796                 case EFI_MD_TYPE_BS_DATA:
797                 case EFI_MD_TYPE_FREE:
798                         /*
799                          * We're allowed to use any entry with these types.
800                          */
801                         break;
802                 default:
803                         continue;
804                 }
805
806                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
807                     physmap, physmap_idxp))
808                         break;
809         }
810 }
811
812 #ifdef FDT
813 static void
814 try_load_dtb(caddr_t kmdp)
815 {
816         vm_offset_t dtbp;
817
818         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
819         if (dtbp == (vm_offset_t)NULL) {
820                 printf("ERROR loading DTB\n");
821                 return;
822         }
823
824         if (OF_install(OFW_FDT, 0) == FALSE)
825                 panic("Cannot install FDT");
826
827         if (OF_init((void *)dtbp) != 0)
828                 panic("OF_init failed with the found device tree");
829 }
830 #endif
831
832 static bool
833 bus_probe(void)
834 {
835         bool has_acpi, has_fdt;
836         char *order, *env;
837
838         has_acpi = has_fdt = false;
839
840 #ifdef FDT
841         has_fdt = (OF_peer(0) != 0);
842 #endif
843 #ifdef DEV_ACPI
844         has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
845 #endif
846
847         env = kern_getenv("kern.cfg.order");
848         if (env != NULL) {
849                 order = env;
850                 while (order != NULL) {
851                         if (has_acpi &&
852                             strncmp(order, "acpi", 4) == 0 &&
853                             (order[4] == ',' || order[4] == '\0')) {
854                                 arm64_bus_method = ARM64_BUS_ACPI;
855                                 break;
856                         }
857                         if (has_fdt &&
858                             strncmp(order, "fdt", 3) == 0 &&
859                             (order[3] == ',' || order[3] == '\0')) {
860                                 arm64_bus_method = ARM64_BUS_FDT;
861                                 break;
862                         }
863                         order = strchr(order, ',');
864                 }
865                 freeenv(env);
866
867                 /* If we set the bus method it is valid */
868                 if (arm64_bus_method != ARM64_BUS_NONE)
869                         return (true);
870         }
871         /* If no order or an invalid order was set use the default */
872         if (arm64_bus_method == ARM64_BUS_NONE) {
873                 if (has_fdt)
874                         arm64_bus_method = ARM64_BUS_FDT;
875                 else if (has_acpi)
876                         arm64_bus_method = ARM64_BUS_ACPI;
877         }
878
879         /*
880          * If no option was set the default is valid, otherwise we are
881          * setting one to get cninit() working, then calling panic to tell
882          * the user about the invalid bus setup.
883          */
884         return (env == NULL);
885 }
886
887 static void
888 cache_setup(void)
889 {
890         int dcache_line_shift, icache_line_shift, dczva_line_shift;
891         uint32_t ctr_el0;
892         uint32_t dczid_el0;
893
894         ctr_el0 = READ_SPECIALREG(ctr_el0);
895
896         /* Read the log2 words in each D cache line */
897         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
898         /* Get the D cache line size */
899         dcache_line_size = sizeof(int) << dcache_line_shift;
900
901         /* And the same for the I cache */
902         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
903         icache_line_size = sizeof(int) << icache_line_shift;
904
905         idcache_line_size = MIN(dcache_line_size, icache_line_size);
906
907         dczid_el0 = READ_SPECIALREG(dczid_el0);
908
909         /* Check if dc zva is not prohibited */
910         if (dczid_el0 & DCZID_DZP)
911                 dczva_line_size = 0;
912         else {
913                 /* Same as with above calculations */
914                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
915                 dczva_line_size = sizeof(int) << dczva_line_shift;
916
917                 /* Change pagezero function */
918                 pagezero = pagezero_cache;
919         }
920 }
921
922 void
923 initarm(struct arm64_bootparams *abp)
924 {
925         struct efi_map_header *efihdr;
926         struct pcpu *pcpup;
927 #ifdef FDT
928         struct mem_region mem_regions[FDT_MEM_REGIONS];
929         int mem_regions_sz;
930 #endif
931         vm_offset_t lastaddr;
932         caddr_t kmdp;
933         vm_paddr_t mem_len;
934         bool valid;
935         int i;
936
937         /* Set the module data location */
938         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
939
940         /* Find the kernel address */
941         kmdp = preload_search_by_type("elf kernel");
942         if (kmdp == NULL)
943                 kmdp = preload_search_by_type("elf64 kernel");
944
945         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
946         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
947
948 #ifdef FDT
949         try_load_dtb(kmdp);
950 #endif
951
952         /* Find the address to start allocating from */
953         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
954
955         /* Load the physical memory ranges */
956         physmap_idx = 0;
957         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
958             MODINFO_METADATA | MODINFOMD_EFI_MAP);
959         if (efihdr != NULL)
960                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
961 #ifdef FDT
962         else {
963                 /* Grab physical memory regions information from device tree. */
964                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
965                     NULL) != 0)
966                         panic("Cannot get physical memory regions");
967                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
968                     &physmap_idx);
969         }
970 #endif
971
972         /* Print the memory map */
973         mem_len = 0;
974         for (i = 0; i < physmap_idx; i += 2) {
975                 dump_avail[i] = physmap[i];
976                 dump_avail[i + 1] = physmap[i + 1];
977                 mem_len += physmap[i + 1] - physmap[i];
978         }
979         dump_avail[i] = 0;
980         dump_avail[i + 1] = 0;
981
982         /* Set the pcpu data, this is needed by pmap_bootstrap */
983         pcpup = &__pcpu[0];
984         pcpu_init(pcpup, 0, sizeof(struct pcpu));
985
986         /*
987          * Set the pcpu pointer with a backup in tpidr_el1 to be
988          * loaded when entering the kernel from userland.
989          */
990         __asm __volatile(
991             "mov x18, %0 \n"
992             "msr tpidr_el1, %0" :: "r"(pcpup));
993
994         PCPU_SET(curthread, &thread0);
995
996         /* Do basic tuning, hz etc */
997         init_param1();
998
999         cache_setup();
1000
1001         /* Bootstrap enough of pmap  to enter the kernel proper */
1002         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1003             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1004
1005         devmap_bootstrap(0, NULL);
1006
1007         valid = bus_probe();
1008
1009         cninit();
1010
1011         if (!valid)
1012                 panic("Invalid bus configuration: %s",
1013                     kern_getenv("kern.cfg.order"));
1014
1015         init_proc0(abp->kern_stack);
1016         msgbufinit(msgbufp, msgbufsize);
1017         mutex_init();
1018         init_param2(physmem);
1019
1020         dbg_monitor_init();
1021         kdb_init();
1022
1023         early_boot = 0;
1024 }
1025
1026 #ifdef DDB
1027 #include <ddb/ddb.h>
1028
1029 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1030 {
1031 #define PRINT_REG(reg)  \
1032     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1033
1034         PRINT_REG(actlr_el1);
1035         PRINT_REG(afsr0_el1);
1036         PRINT_REG(afsr1_el1);
1037         PRINT_REG(aidr_el1);
1038         PRINT_REG(amair_el1);
1039         PRINT_REG(ccsidr_el1);
1040         PRINT_REG(clidr_el1);
1041         PRINT_REG(contextidr_el1);
1042         PRINT_REG(cpacr_el1);
1043         PRINT_REG(csselr_el1);
1044         PRINT_REG(ctr_el0);
1045         PRINT_REG(currentel);
1046         PRINT_REG(daif);
1047         PRINT_REG(dczid_el0);
1048         PRINT_REG(elr_el1);
1049         PRINT_REG(esr_el1);
1050         PRINT_REG(far_el1);
1051 #if 0
1052         /* ARM64TODO: Enable VFP before reading floating-point registers */
1053         PRINT_REG(fpcr);
1054         PRINT_REG(fpsr);
1055 #endif
1056         PRINT_REG(id_aa64afr0_el1);
1057         PRINT_REG(id_aa64afr1_el1);
1058         PRINT_REG(id_aa64dfr0_el1);
1059         PRINT_REG(id_aa64dfr1_el1);
1060         PRINT_REG(id_aa64isar0_el1);
1061         PRINT_REG(id_aa64isar1_el1);
1062         PRINT_REG(id_aa64pfr0_el1);
1063         PRINT_REG(id_aa64pfr1_el1);
1064         PRINT_REG(id_afr0_el1);
1065         PRINT_REG(id_dfr0_el1);
1066         PRINT_REG(id_isar0_el1);
1067         PRINT_REG(id_isar1_el1);
1068         PRINT_REG(id_isar2_el1);
1069         PRINT_REG(id_isar3_el1);
1070         PRINT_REG(id_isar4_el1);
1071         PRINT_REG(id_isar5_el1);
1072         PRINT_REG(id_mmfr0_el1);
1073         PRINT_REG(id_mmfr1_el1);
1074         PRINT_REG(id_mmfr2_el1);
1075         PRINT_REG(id_mmfr3_el1);
1076 #if 0
1077         /* Missing from llvm */
1078         PRINT_REG(id_mmfr4_el1);
1079 #endif
1080         PRINT_REG(id_pfr0_el1);
1081         PRINT_REG(id_pfr1_el1);
1082         PRINT_REG(isr_el1);
1083         PRINT_REG(mair_el1);
1084         PRINT_REG(midr_el1);
1085         PRINT_REG(mpidr_el1);
1086         PRINT_REG(mvfr0_el1);
1087         PRINT_REG(mvfr1_el1);
1088         PRINT_REG(mvfr2_el1);
1089         PRINT_REG(revidr_el1);
1090         PRINT_REG(sctlr_el1);
1091         PRINT_REG(sp_el0);
1092         PRINT_REG(spsel);
1093         PRINT_REG(spsr_el1);
1094         PRINT_REG(tcr_el1);
1095         PRINT_REG(tpidr_el0);
1096         PRINT_REG(tpidr_el1);
1097         PRINT_REG(tpidrro_el0);
1098         PRINT_REG(ttbr0_el1);
1099         PRINT_REG(ttbr1_el1);
1100         PRINT_REG(vbar_el1);
1101 #undef PRINT_REG
1102 }
1103
1104 DB_SHOW_COMMAND(vtop, db_show_vtop)
1105 {
1106         uint64_t phys;
1107
1108         if (have_addr) {
1109                 phys = arm64_address_translate_s1e1r(addr);
1110                 db_printf("Physical address reg (read):  0x%016lx\n", phys);
1111                 phys = arm64_address_translate_s1e1w(addr);
1112                 db_printf("Physical address reg (write): 0x%016lx\n", phys);
1113         } else
1114                 db_printf("show vtop <virt_addr>\n");
1115 }
1116 #endif