]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
Add more UEFI/e820 memory types from latest specifications.
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_platform.h"
29 #include "opt_ddb.h"
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/buf.h>
37 #include <sys/bus.h>
38 #include <sys/cons.h>
39 #include <sys/cpu.h>
40 #include <sys/devmap.h>
41 #include <sys/efi.h>
42 #include <sys/exec.h>
43 #include <sys/imgact.h>
44 #include <sys/kdb.h> 
45 #include <sys/kernel.h>
46 #include <sys/limits.h>
47 #include <sys/linker.h>
48 #include <sys/msgbuf.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/ptrace.h>
52 #include <sys/reboot.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysent.h>
58 #include <sys/sysproto.h>
59 #include <sys/ucontext.h>
60 #include <sys/vdso.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_page.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_pager.h>
69
70 #include <machine/armreg.h>
71 #include <machine/cpu.h>
72 #include <machine/debug_monitor.h>
73 #include <machine/kdb.h>
74 #include <machine/machdep.h>
75 #include <machine/metadata.h>
76 #include <machine/md_var.h>
77 #include <machine/pcb.h>
78 #include <machine/reg.h>
79 #include <machine/vmparam.h>
80
81 #ifdef VFP
82 #include <machine/vfp.h>
83 #endif
84
85 #ifdef FDT
86 #include <dev/fdt/fdt_common.h>
87 #include <dev/ofw/openfirm.h>
88 #endif
89
90 struct pcpu __pcpu[MAXCPU];
91
92 static struct trapframe proc0_tf;
93
94 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
95 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
96
97 int early_boot = 1;
98 int cold = 1;
99 long realmem = 0;
100 long Maxmem = 0;
101
102 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
103 vm_paddr_t physmap[PHYSMAP_SIZE];
104 u_int physmap_idx;
105
106 struct kva_md_info kmi;
107
108 int64_t dcache_line_size;       /* The minimum D cache line size */
109 int64_t icache_line_size;       /* The minimum I cache line size */
110 int64_t idcache_line_size;      /* The minimum cache line size */
111 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
112
113 /* pagezero_* implementations are provided in support.S */
114 void pagezero_simple(void *);
115 void pagezero_cache(void *);
116
117 /* pagezero_simple is default pagezero */
118 void (*pagezero)(void *p) = pagezero_simple;
119
120 static void
121 cpu_startup(void *dummy)
122 {
123
124         identify_cpu();
125
126         vm_ksubmap_init(&kmi);
127         bufinit();
128         vm_pager_bufferinit();
129 }
130
131 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
132
133 int
134 cpu_idle_wakeup(int cpu)
135 {
136
137         return (0);
138 }
139
140 int
141 fill_regs(struct thread *td, struct reg *regs)
142 {
143         struct trapframe *frame;
144
145         frame = td->td_frame;
146         regs->sp = frame->tf_sp;
147         regs->lr = frame->tf_lr;
148         regs->elr = frame->tf_elr;
149         regs->spsr = frame->tf_spsr;
150
151         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
152
153         return (0);
154 }
155
156 int
157 set_regs(struct thread *td, struct reg *regs)
158 {
159         struct trapframe *frame;
160
161         frame = td->td_frame;
162         frame->tf_sp = regs->sp;
163         frame->tf_lr = regs->lr;
164         frame->tf_elr = regs->elr;
165         frame->tf_spsr = regs->spsr;
166
167         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
168
169         return (0);
170 }
171
172 int
173 fill_fpregs(struct thread *td, struct fpreg *regs)
174 {
175 #ifdef VFP
176         struct pcb *pcb;
177
178         pcb = td->td_pcb;
179         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
180                 /*
181                  * If we have just been running VFP instructions we will
182                  * need to save the state to memcpy it below.
183                  */
184                 vfp_save_state(td, pcb);
185
186                 memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
187                 regs->fp_cr = pcb->pcb_fpcr;
188                 regs->fp_sr = pcb->pcb_fpsr;
189         } else
190 #endif
191                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
192         return (0);
193 }
194
195 int
196 set_fpregs(struct thread *td, struct fpreg *regs)
197 {
198 #ifdef VFP
199         struct pcb *pcb;
200
201         pcb = td->td_pcb;
202         memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
203         pcb->pcb_fpcr = regs->fp_cr;
204         pcb->pcb_fpsr = regs->fp_sr;
205 #endif
206         return (0);
207 }
208
209 int
210 fill_dbregs(struct thread *td, struct dbreg *regs)
211 {
212
213         panic("ARM64TODO: fill_dbregs");
214 }
215
216 int
217 set_dbregs(struct thread *td, struct dbreg *regs)
218 {
219
220         panic("ARM64TODO: set_dbregs");
221 }
222
223 int
224 ptrace_set_pc(struct thread *td, u_long addr)
225 {
226
227         panic("ARM64TODO: ptrace_set_pc");
228         return (0);
229 }
230
231 int
232 ptrace_single_step(struct thread *td)
233 {
234
235         td->td_frame->tf_spsr |= PSR_SS;
236         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
237         return (0);
238 }
239
240 int
241 ptrace_clear_single_step(struct thread *td)
242 {
243
244         td->td_frame->tf_spsr &= ~PSR_SS;
245         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
246         return (0);
247 }
248
249 void
250 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
251 {
252         struct trapframe *tf = td->td_frame;
253
254         memset(tf, 0, sizeof(struct trapframe));
255
256         /*
257          * We need to set x0 for init as it doesn't call
258          * cpu_set_syscall_retval to copy the value. We also
259          * need to set td_retval for the cases where we do.
260          */
261         tf->tf_x[0] = td->td_retval[0] = stack;
262         tf->tf_sp = STACKALIGN(stack);
263         tf->tf_lr = imgp->entry_addr;
264         tf->tf_elr = imgp->entry_addr;
265 }
266
267 /* Sanity check these are the same size, they will be memcpy'd to and fro */
268 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
269     sizeof((struct gpregs *)0)->gp_x);
270 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
271     sizeof((struct reg *)0)->x);
272
273 int
274 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
275 {
276         struct trapframe *tf = td->td_frame;
277
278         if (clear_ret & GET_MC_CLEAR_RET) {
279                 mcp->mc_gpregs.gp_x[0] = 0;
280                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
281         } else {
282                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
283                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
284         }
285
286         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
287             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
288
289         mcp->mc_gpregs.gp_sp = tf->tf_sp;
290         mcp->mc_gpregs.gp_lr = tf->tf_lr;
291         mcp->mc_gpregs.gp_elr = tf->tf_elr;
292
293         return (0);
294 }
295
296 int
297 set_mcontext(struct thread *td, mcontext_t *mcp)
298 {
299         struct trapframe *tf = td->td_frame;
300
301         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
302
303         tf->tf_sp = mcp->mc_gpregs.gp_sp;
304         tf->tf_lr = mcp->mc_gpregs.gp_lr;
305         tf->tf_elr = mcp->mc_gpregs.gp_elr;
306         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
307
308         return (0);
309 }
310
311 static void
312 get_fpcontext(struct thread *td, mcontext_t *mcp)
313 {
314 #ifdef VFP
315         struct pcb *curpcb;
316
317         critical_enter();
318
319         curpcb = curthread->td_pcb;
320
321         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
322                 /*
323                  * If we have just been running VFP instructions we will
324                  * need to save the state to memcpy it below.
325                  */
326                 vfp_save_state(td, curpcb);
327
328                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
329                     sizeof(mcp->mc_fpregs));
330                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
331                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
332                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
333                 mcp->mc_flags |= _MC_FP_VALID;
334         }
335
336         critical_exit();
337 #endif
338 }
339
340 static void
341 set_fpcontext(struct thread *td, mcontext_t *mcp)
342 {
343 #ifdef VFP
344         struct pcb *curpcb;
345
346         critical_enter();
347
348         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
349                 curpcb = curthread->td_pcb;
350
351                 /*
352                  * Discard any vfp state for the current thread, we
353                  * are about to override it.
354                  */
355                 vfp_discard(td);
356
357                 memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
358                     sizeof(mcp->mc_fpregs));
359                 curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
360                 curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
361                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
362         }
363
364         critical_exit();
365 #endif
366 }
367
368 void
369 cpu_idle(int busy)
370 {
371
372         spinlock_enter();
373         if (!busy)
374                 cpu_idleclock();
375         if (!sched_runnable())
376                 __asm __volatile(
377                     "dsb sy \n"
378                     "wfi    \n");
379         if (!busy)
380                 cpu_activeclock();
381         spinlock_exit();
382 }
383
384 void
385 cpu_halt(void)
386 {
387
388         /* We should have shutdown by now, if not enter a low power sleep */
389         intr_disable();
390         while (1) {
391                 __asm __volatile("wfi");
392         }
393 }
394
395 /*
396  * Flush the D-cache for non-DMA I/O so that the I-cache can
397  * be made coherent later.
398  */
399 void
400 cpu_flush_dcache(void *ptr, size_t len)
401 {
402
403         /* ARM64TODO TBD */
404 }
405
406 /* Get current clock frequency for the given CPU ID. */
407 int
408 cpu_est_clockrate(int cpu_id, uint64_t *rate)
409 {
410
411         panic("ARM64TODO: cpu_est_clockrate");
412 }
413
414 void
415 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
416 {
417
418         pcpu->pc_acpi_id = 0xffffffff;
419 }
420
421 void
422 spinlock_enter(void)
423 {
424         struct thread *td;
425         register_t daif;
426
427         td = curthread;
428         if (td->td_md.md_spinlock_count == 0) {
429                 daif = intr_disable();
430                 td->td_md.md_spinlock_count = 1;
431                 td->td_md.md_saved_daif = daif;
432         } else
433                 td->td_md.md_spinlock_count++;
434         critical_enter();
435 }
436
437 void
438 spinlock_exit(void)
439 {
440         struct thread *td;
441         register_t daif;
442
443         td = curthread;
444         critical_exit();
445         daif = td->td_md.md_saved_daif;
446         td->td_md.md_spinlock_count--;
447         if (td->td_md.md_spinlock_count == 0)
448                 intr_restore(daif);
449 }
450
451 #ifndef _SYS_SYSPROTO_H_
452 struct sigreturn_args {
453         ucontext_t *ucp;
454 };
455 #endif
456
457 int
458 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
459 {
460         ucontext_t uc;
461         uint32_t spsr;
462
463         if (uap == NULL)
464                 return (EFAULT);
465         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
466                 return (EFAULT);
467
468         spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
469         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
470             (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
471                 return (EINVAL); 
472
473         set_mcontext(td, &uc.uc_mcontext);
474         set_fpcontext(td, &uc.uc_mcontext);
475
476         /* Restore signal mask. */
477         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
478
479         return (EJUSTRETURN);
480 }
481
482 /*
483  * Construct a PCB from a trapframe. This is called from kdb_trap() where
484  * we want to start a backtrace from the function that caused us to enter
485  * the debugger. We have the context in the trapframe, but base the trace
486  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
487  * enough for a backtrace.
488  */
489 void
490 makectx(struct trapframe *tf, struct pcb *pcb)
491 {
492         int i;
493
494         for (i = 0; i < PCB_LR; i++)
495                 pcb->pcb_x[i] = tf->tf_x[i];
496
497         pcb->pcb_x[PCB_LR] = tf->tf_lr;
498         pcb->pcb_pc = tf->tf_elr;
499         pcb->pcb_sp = tf->tf_sp;
500 }
501
502 void
503 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
504 {
505         struct thread *td;
506         struct proc *p;
507         struct trapframe *tf;
508         struct sigframe *fp, frame;
509         struct sigacts *psp;
510         struct sysentvec *sysent;
511         int code, onstack, sig;
512
513         td = curthread;
514         p = td->td_proc;
515         PROC_LOCK_ASSERT(p, MA_OWNED);
516
517         sig = ksi->ksi_signo;
518         code = ksi->ksi_code;
519         psp = p->p_sigacts;
520         mtx_assert(&psp->ps_mtx, MA_OWNED);
521
522         tf = td->td_frame;
523         onstack = sigonstack(tf->tf_sp);
524
525         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
526             catcher, sig);
527
528         /* Allocate and validate space for the signal handler context. */
529         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
530             SIGISMEMBER(psp->ps_sigonstack, sig)) {
531                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
532                     td->td_sigstk.ss_size);
533 #if defined(COMPAT_43)
534                 td->td_sigstk.ss_flags |= SS_ONSTACK;
535 #endif
536         } else {
537                 fp = (struct sigframe *)td->td_frame->tf_sp;
538         }
539
540         /* Make room, keeping the stack aligned */
541         fp--;
542         fp = (struct sigframe *)STACKALIGN(fp);
543
544         /* Fill in the frame to copy out */
545         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
546         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
547         frame.sf_si = ksi->ksi_info;
548         frame.sf_uc.uc_sigmask = *mask;
549         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
550             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
551         frame.sf_uc.uc_stack = td->td_sigstk;
552         mtx_unlock(&psp->ps_mtx);
553         PROC_UNLOCK(td->td_proc);
554
555         /* Copy the sigframe out to the user's stack. */
556         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
557                 /* Process has trashed its stack. Kill it. */
558                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
559                 PROC_LOCK(p);
560                 sigexit(td, SIGILL);
561         }
562
563         tf->tf_x[0]= sig;
564         tf->tf_x[1] = (register_t)&fp->sf_si;
565         tf->tf_x[2] = (register_t)&fp->sf_uc;
566
567         tf->tf_elr = (register_t)catcher;
568         tf->tf_sp = (register_t)fp;
569         sysent = p->p_sysent;
570         if (sysent->sv_sigcode_base != 0)
571                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
572         else
573                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
574                     *(sysent->sv_szsigcode));
575
576         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
577             tf->tf_sp);
578
579         PROC_LOCK(p);
580         mtx_lock(&psp->ps_mtx);
581 }
582
583 static void
584 init_proc0(vm_offset_t kstack)
585 {
586         struct pcpu *pcpup = &__pcpu[0];
587
588         proc_linkup0(&proc0, &thread0);
589         thread0.td_kstack = kstack;
590         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
591         thread0.td_pcb->pcb_fpflags = 0;
592         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
593         thread0.td_frame = &proc0_tf;
594         pcpup->pc_curpcb = thread0.td_pcb;
595 }
596
597 typedef struct {
598         uint32_t type;
599         uint64_t phys_start;
600         uint64_t virt_start;
601         uint64_t num_pages;
602         uint64_t attr;
603 } EFI_MEMORY_DESCRIPTOR;
604
605 static int
606 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
607     u_int *physmap_idxp)
608 {
609         u_int i, insert_idx, _physmap_idx;
610
611         _physmap_idx = *physmap_idxp;
612
613         if (length == 0)
614                 return (1);
615
616         /*
617          * Find insertion point while checking for overlap.  Start off by
618          * assuming the new entry will be added to the end.
619          */
620         insert_idx = _physmap_idx;
621         for (i = 0; i <= _physmap_idx; i += 2) {
622                 if (base < physmap[i + 1]) {
623                         if (base + length <= physmap[i]) {
624                                 insert_idx = i;
625                                 break;
626                         }
627                         if (boothowto & RB_VERBOSE)
628                                 printf(
629                     "Overlapping memory regions, ignoring second region\n");
630                         return (1);
631                 }
632         }
633
634         /* See if we can prepend to the next entry. */
635         if (insert_idx <= _physmap_idx &&
636             base + length == physmap[insert_idx]) {
637                 physmap[insert_idx] = base;
638                 return (1);
639         }
640
641         /* See if we can append to the previous entry. */
642         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
643                 physmap[insert_idx - 1] += length;
644                 return (1);
645         }
646
647         _physmap_idx += 2;
648         *physmap_idxp = _physmap_idx;
649         if (_physmap_idx == PHYSMAP_SIZE) {
650                 printf(
651                 "Too many segments in the physical address map, giving up\n");
652                 return (0);
653         }
654
655         /*
656          * Move the last 'N' entries down to make room for the new
657          * entry if needed.
658          */
659         for (i = _physmap_idx; i > insert_idx; i -= 2) {
660                 physmap[i] = physmap[i - 2];
661                 physmap[i + 1] = physmap[i - 1];
662         }
663
664         /* Insert the new entry. */
665         physmap[insert_idx] = base;
666         physmap[insert_idx + 1] = base + length;
667         return (1);
668 }
669
670 #ifdef FDT
671 static void
672 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
673     u_int *physmap_idxp)
674 {
675
676         for (int i = 0; i < mrcnt; i++) {
677                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
678                     physmap_idxp))
679                         break;
680         }
681 }
682 #endif
683
684 #define efi_next_descriptor(ptr, size) \
685         ((struct efi_md *)(((uint8_t *) ptr) + size))
686
687 static void
688 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
689     u_int *physmap_idxp)
690 {
691         struct efi_md *map, *p;
692         const char *type;
693         size_t efisz;
694         int ndesc, i;
695
696         static const char *types[] = {
697                 "Reserved",
698                 "LoaderCode",
699                 "LoaderData",
700                 "BootServicesCode",
701                 "BootServicesData",
702                 "RuntimeServicesCode",
703                 "RuntimeServicesData",
704                 "ConventionalMemory",
705                 "UnusableMemory",
706                 "ACPIReclaimMemory",
707                 "ACPIMemoryNVS",
708                 "MemoryMappedIO",
709                 "MemoryMappedIOPortSpace",
710                 "PalCode",
711                 "PersistentMemory"
712         };
713
714         /*
715          * Memory map data provided by UEFI via the GetMemoryMap
716          * Boot Services API.
717          */
718         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
719         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
720
721         if (efihdr->descriptor_size == 0)
722                 return;
723         ndesc = efihdr->memory_size / efihdr->descriptor_size;
724
725         if (boothowto & RB_VERBOSE)
726                 printf("%23s %12s %12s %8s %4s\n",
727                     "Type", "Physical", "Virtual", "#Pages", "Attr");
728
729         for (i = 0, p = map; i < ndesc; i++,
730             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
731                 if (boothowto & RB_VERBOSE) {
732                         if (p->md_type < nitems(types))
733                                 type = types[p->md_type];
734                         else
735                                 type = "<INVALID>";
736                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
737                             p->md_virt, p->md_pages);
738                         if (p->md_attr & EFI_MD_ATTR_UC)
739                                 printf("UC ");
740                         if (p->md_attr & EFI_MD_ATTR_WC)
741                                 printf("WC ");
742                         if (p->md_attr & EFI_MD_ATTR_WT)
743                                 printf("WT ");
744                         if (p->md_attr & EFI_MD_ATTR_WB)
745                                 printf("WB ");
746                         if (p->md_attr & EFI_MD_ATTR_UCE)
747                                 printf("UCE ");
748                         if (p->md_attr & EFI_MD_ATTR_WP)
749                                 printf("WP ");
750                         if (p->md_attr & EFI_MD_ATTR_RP)
751                                 printf("RP ");
752                         if (p->md_attr & EFI_MD_ATTR_XP)
753                                 printf("XP ");
754                         if (p->md_attr & EFI_MD_ATTR_NV)
755                                 printf("NV ");
756                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
757                                 printf("MORE_RELIABLE ");
758                         if (p->md_attr & EFI_MD_ATTR_RO)
759                                 printf("RO ");
760                         if (p->md_attr & EFI_MD_ATTR_RT)
761                                 printf("RUNTIME");
762                         printf("\n");
763                 }
764
765                 switch (p->md_type) {
766                 case EFI_MD_TYPE_CODE:
767                 case EFI_MD_TYPE_DATA:
768                 case EFI_MD_TYPE_BS_CODE:
769                 case EFI_MD_TYPE_BS_DATA:
770                 case EFI_MD_TYPE_FREE:
771                         /*
772                          * We're allowed to use any entry with these types.
773                          */
774                         break;
775                 default:
776                         continue;
777                 }
778
779                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
780                     physmap, physmap_idxp))
781                         break;
782         }
783 }
784
785 #ifdef FDT
786 static void
787 try_load_dtb(caddr_t kmdp)
788 {
789         vm_offset_t dtbp;
790
791         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
792         if (dtbp == (vm_offset_t)NULL) {
793                 printf("ERROR loading DTB\n");
794                 return;
795         }
796
797         if (OF_install(OFW_FDT, 0) == FALSE)
798                 panic("Cannot install FDT");
799
800         if (OF_init((void *)dtbp) != 0)
801                 panic("OF_init failed with the found device tree");
802 }
803 #endif
804
805 static void
806 cache_setup(void)
807 {
808         int dcache_line_shift, icache_line_shift, dczva_line_shift;
809         uint32_t ctr_el0;
810         uint32_t dczid_el0;
811
812         ctr_el0 = READ_SPECIALREG(ctr_el0);
813
814         /* Read the log2 words in each D cache line */
815         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
816         /* Get the D cache line size */
817         dcache_line_size = sizeof(int) << dcache_line_shift;
818
819         /* And the same for the I cache */
820         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
821         icache_line_size = sizeof(int) << icache_line_shift;
822
823         idcache_line_size = MIN(dcache_line_size, icache_line_size);
824
825         dczid_el0 = READ_SPECIALREG(dczid_el0);
826
827         /* Check if dc zva is not prohibited */
828         if (dczid_el0 & DCZID_DZP)
829                 dczva_line_size = 0;
830         else {
831                 /* Same as with above calculations */
832                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
833                 dczva_line_size = sizeof(int) << dczva_line_shift;
834
835                 /* Change pagezero function */
836                 pagezero = pagezero_cache;
837         }
838 }
839
840 void
841 initarm(struct arm64_bootparams *abp)
842 {
843         struct efi_map_header *efihdr;
844         struct pcpu *pcpup;
845 #ifdef FDT
846         struct mem_region mem_regions[FDT_MEM_REGIONS];
847         int mem_regions_sz;
848 #endif
849         vm_offset_t lastaddr;
850         caddr_t kmdp;
851         vm_paddr_t mem_len;
852         int i;
853
854         /* Set the module data location */
855         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
856
857         /* Find the kernel address */
858         kmdp = preload_search_by_type("elf kernel");
859         if (kmdp == NULL)
860                 kmdp = preload_search_by_type("elf64 kernel");
861
862         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
863         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
864
865 #ifdef FDT
866         try_load_dtb(kmdp);
867 #endif
868
869         /* Find the address to start allocating from */
870         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
871
872         /* Load the physical memory ranges */
873         physmap_idx = 0;
874         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
875             MODINFO_METADATA | MODINFOMD_EFI_MAP);
876         if (efihdr != NULL)
877                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
878 #ifdef FDT
879         else {
880                 /* Grab physical memory regions information from device tree. */
881                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
882                     NULL) != 0)
883                         panic("Cannot get physical memory regions");
884                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
885                     &physmap_idx);
886         }
887 #endif
888
889         /* Print the memory map */
890         mem_len = 0;
891         for (i = 0; i < physmap_idx; i += 2) {
892                 dump_avail[i] = physmap[i];
893                 dump_avail[i + 1] = physmap[i + 1];
894                 mem_len += physmap[i + 1] - physmap[i];
895         }
896         dump_avail[i] = 0;
897         dump_avail[i + 1] = 0;
898
899         /* Set the pcpu data, this is needed by pmap_bootstrap */
900         pcpup = &__pcpu[0];
901         pcpu_init(pcpup, 0, sizeof(struct pcpu));
902
903         /*
904          * Set the pcpu pointer with a backup in tpidr_el1 to be
905          * loaded when entering the kernel from userland.
906          */
907         __asm __volatile(
908             "mov x18, %0 \n"
909             "msr tpidr_el1, %0" :: "r"(pcpup));
910
911         PCPU_SET(curthread, &thread0);
912
913         /* Do basic tuning, hz etc */
914         init_param1();
915
916         cache_setup();
917
918         /* Bootstrap enough of pmap  to enter the kernel proper */
919         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
920             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
921
922         devmap_bootstrap(0, NULL);
923
924         cninit();
925
926         init_proc0(abp->kern_stack);
927         msgbufinit(msgbufp, msgbufsize);
928         mutex_init();
929         init_param2(physmem);
930
931         dbg_monitor_init();
932         kdb_init();
933
934         early_boot = 0;
935 }
936
937 uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *,
938     struct timecounter *);
939
940 uint32_t
941 cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
942 {
943
944         return (arm_cpu_fill_vdso_timehands != NULL ?
945             arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0);
946 }
947
948 #ifdef DDB
949 #include <ddb/ddb.h>
950
951 DB_SHOW_COMMAND(specialregs, db_show_spregs)
952 {
953 #define PRINT_REG(reg)  \
954     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
955
956         PRINT_REG(actlr_el1);
957         PRINT_REG(afsr0_el1);
958         PRINT_REG(afsr1_el1);
959         PRINT_REG(aidr_el1);
960         PRINT_REG(amair_el1);
961         PRINT_REG(ccsidr_el1);
962         PRINT_REG(clidr_el1);
963         PRINT_REG(contextidr_el1);
964         PRINT_REG(cpacr_el1);
965         PRINT_REG(csselr_el1);
966         PRINT_REG(ctr_el0);
967         PRINT_REG(currentel);
968         PRINT_REG(daif);
969         PRINT_REG(dczid_el0);
970         PRINT_REG(elr_el1);
971         PRINT_REG(esr_el1);
972         PRINT_REG(far_el1);
973 #if 0
974         /* ARM64TODO: Enable VFP before reading floating-point registers */
975         PRINT_REG(fpcr);
976         PRINT_REG(fpsr);
977 #endif
978         PRINT_REG(id_aa64afr0_el1);
979         PRINT_REG(id_aa64afr1_el1);
980         PRINT_REG(id_aa64dfr0_el1);
981         PRINT_REG(id_aa64dfr1_el1);
982         PRINT_REG(id_aa64isar0_el1);
983         PRINT_REG(id_aa64isar1_el1);
984         PRINT_REG(id_aa64pfr0_el1);
985         PRINT_REG(id_aa64pfr1_el1);
986         PRINT_REG(id_afr0_el1);
987         PRINT_REG(id_dfr0_el1);
988         PRINT_REG(id_isar0_el1);
989         PRINT_REG(id_isar1_el1);
990         PRINT_REG(id_isar2_el1);
991         PRINT_REG(id_isar3_el1);
992         PRINT_REG(id_isar4_el1);
993         PRINT_REG(id_isar5_el1);
994         PRINT_REG(id_mmfr0_el1);
995         PRINT_REG(id_mmfr1_el1);
996         PRINT_REG(id_mmfr2_el1);
997         PRINT_REG(id_mmfr3_el1);
998 #if 0
999         /* Missing from llvm */
1000         PRINT_REG(id_mmfr4_el1);
1001 #endif
1002         PRINT_REG(id_pfr0_el1);
1003         PRINT_REG(id_pfr1_el1);
1004         PRINT_REG(isr_el1);
1005         PRINT_REG(mair_el1);
1006         PRINT_REG(midr_el1);
1007         PRINT_REG(mpidr_el1);
1008         PRINT_REG(mvfr0_el1);
1009         PRINT_REG(mvfr1_el1);
1010         PRINT_REG(mvfr2_el1);
1011         PRINT_REG(revidr_el1);
1012         PRINT_REG(sctlr_el1);
1013         PRINT_REG(sp_el0);
1014         PRINT_REG(spsel);
1015         PRINT_REG(spsr_el1);
1016         PRINT_REG(tcr_el1);
1017         PRINT_REG(tpidr_el0);
1018         PRINT_REG(tpidr_el1);
1019         PRINT_REG(tpidrro_el0);
1020         PRINT_REG(ttbr0_el1);
1021         PRINT_REG(ttbr1_el1);
1022         PRINT_REG(vbar_el1);
1023 #undef PRINT_REG
1024 }
1025
1026 DB_SHOW_COMMAND(vtop, db_show_vtop)
1027 {
1028         uint64_t phys;
1029
1030         if (have_addr) {
1031                 phys = arm64_address_translate_s1e1r(addr);
1032                 db_printf("Physical address reg: 0x%016lx\n", phys);
1033         } else
1034                 db_printf("show vtop <virt_addr>\n");
1035 }
1036 #endif