]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
Upgrade Unbound to 1.6.2. More to follow.
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_acpi.h"
29 #include "opt_platform.h"
30 #include "opt_ddb.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/buf.h>
38 #include <sys/bus.h>
39 #include <sys/cons.h>
40 #include <sys/cpu.h>
41 #include <sys/devmap.h>
42 #include <sys/efi.h>
43 #include <sys/exec.h>
44 #include <sys/imgact.h>
45 #include <sys/kdb.h> 
46 #include <sys/kernel.h>
47 #include <sys/limits.h>
48 #include <sys/linker.h>
49 #include <sys/msgbuf.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/ptrace.h>
53 #include <sys/reboot.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/signalvar.h>
57 #include <sys/syscallsubr.h>
58 #include <sys/sysent.h>
59 #include <sys/sysproto.h>
60 #include <sys/ucontext.h>
61 #include <sys/vdso.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_pager.h>
70
71 #include <machine/armreg.h>
72 #include <machine/cpu.h>
73 #include <machine/debug_monitor.h>
74 #include <machine/kdb.h>
75 #include <machine/machdep.h>
76 #include <machine/metadata.h>
77 #include <machine/md_var.h>
78 #include <machine/pcb.h>
79 #include <machine/reg.h>
80 #include <machine/undefined.h>
81 #include <machine/vmparam.h>
82
83 #ifdef VFP
84 #include <machine/vfp.h>
85 #endif
86
87 #ifdef DEV_ACPI
88 #include <contrib/dev/acpica/include/acpi.h>
89 #include <machine/acpica_machdep.h>
90 #endif
91
92 #ifdef FDT
93 #include <dev/fdt/fdt_common.h>
94 #include <dev/ofw/openfirm.h>
95 #endif
96
97
98 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
99
100 struct pcpu __pcpu[MAXCPU];
101
102 static struct trapframe proc0_tf;
103
104 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
105 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
106
107 int early_boot = 1;
108 int cold = 1;
109 long realmem = 0;
110 long Maxmem = 0;
111
112 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
113 vm_paddr_t physmap[PHYSMAP_SIZE];
114 u_int physmap_idx;
115
116 struct kva_md_info kmi;
117
118 int64_t dcache_line_size;       /* The minimum D cache line size */
119 int64_t icache_line_size;       /* The minimum I cache line size */
120 int64_t idcache_line_size;      /* The minimum cache line size */
121 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
122 int has_pan;
123
124 /*
125  * Physical address of the EFI System Table. Stashed from the metadata hints
126  * passed into the kernel and used by the EFI code to call runtime services.
127  */
128 vm_paddr_t efi_systbl_phys;
129
130 /* pagezero_* implementations are provided in support.S */
131 void pagezero_simple(void *);
132 void pagezero_cache(void *);
133
134 /* pagezero_simple is default pagezero */
135 void (*pagezero)(void *p) = pagezero_simple;
136
137 static void
138 pan_setup(void)
139 {
140         uint64_t id_aa64mfr1;
141
142         id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
143         if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
144                 has_pan = 1;
145 }
146
147 void
148 pan_enable(void)
149 {
150
151         /*
152          * The LLVM integrated assembler doesn't understand the PAN
153          * PSTATE field. Because of this we need to manually create
154          * the instruction in an asm block. This is equivalent to:
155          * msr pan, #1
156          *
157          * This sets the PAN bit, stopping the kernel from accessing
158          * memory when userspace can also access it unless the kernel
159          * uses the userspace load/store instructions.
160          */
161         if (has_pan) {
162                 WRITE_SPECIALREG(sctlr_el1,
163                     READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
164                 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
165         }
166 }
167
168 static void
169 cpu_startup(void *dummy)
170 {
171
172         undef_init();
173         identify_cpu();
174         install_cpu_errata();
175
176         vm_ksubmap_init(&kmi);
177         bufinit();
178         vm_pager_bufferinit();
179 }
180
181 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
182
183 int
184 cpu_idle_wakeup(int cpu)
185 {
186
187         return (0);
188 }
189
190 int
191 fill_regs(struct thread *td, struct reg *regs)
192 {
193         struct trapframe *frame;
194
195         frame = td->td_frame;
196         regs->sp = frame->tf_sp;
197         regs->lr = frame->tf_lr;
198         regs->elr = frame->tf_elr;
199         regs->spsr = frame->tf_spsr;
200
201         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
202
203         return (0);
204 }
205
206 int
207 set_regs(struct thread *td, struct reg *regs)
208 {
209         struct trapframe *frame;
210
211         frame = td->td_frame;
212         frame->tf_sp = regs->sp;
213         frame->tf_lr = regs->lr;
214         frame->tf_elr = regs->elr;
215         frame->tf_spsr &= ~PSR_FLAGS;
216         frame->tf_spsr |= regs->spsr & PSR_FLAGS;
217
218         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
219
220         return (0);
221 }
222
223 int
224 fill_fpregs(struct thread *td, struct fpreg *regs)
225 {
226 #ifdef VFP
227         struct pcb *pcb;
228
229         pcb = td->td_pcb;
230         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
231                 /*
232                  * If we have just been running VFP instructions we will
233                  * need to save the state to memcpy it below.
234                  */
235                 if (td == curthread)
236                         vfp_save_state(td, pcb);
237
238                 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
239                     ("Called fill_fpregs while the kernel is using the VFP"));
240                 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
241                     sizeof(regs->fp_q));
242                 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
243                 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
244         } else
245 #endif
246                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
247         return (0);
248 }
249
250 int
251 set_fpregs(struct thread *td, struct fpreg *regs)
252 {
253 #ifdef VFP
254         struct pcb *pcb;
255
256         pcb = td->td_pcb;
257         KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
258             ("Called set_fpregs while the kernel is using the VFP"));
259         memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
260         pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
261         pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
262 #endif
263         return (0);
264 }
265
266 int
267 fill_dbregs(struct thread *td, struct dbreg *regs)
268 {
269
270         printf("ARM64TODO: fill_dbregs");
271         return (EDOOFUS);
272 }
273
274 int
275 set_dbregs(struct thread *td, struct dbreg *regs)
276 {
277
278         printf("ARM64TODO: set_dbregs");
279         return (EDOOFUS);
280 }
281
282 #ifdef COMPAT_FREEBSD32
283 int
284 fill_regs32(struct thread *td, struct reg32 *regs)
285 {
286
287         printf("ARM64TODO: fill_regs32");
288         return (EDOOFUS);
289 }
290
291 int
292 set_regs32(struct thread *td, struct reg32 *regs)
293 {
294
295         printf("ARM64TODO: set_regs32");
296         return (EDOOFUS);
297 }
298
299 int
300 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
301 {
302
303         printf("ARM64TODO: fill_fpregs32");
304         return (EDOOFUS);
305 }
306
307 int
308 set_fpregs32(struct thread *td, struct fpreg32 *regs)
309 {
310
311         printf("ARM64TODO: set_fpregs32");
312         return (EDOOFUS);
313 }
314
315 int
316 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
317 {
318
319         printf("ARM64TODO: fill_dbregs32");
320         return (EDOOFUS);
321 }
322
323 int
324 set_dbregs32(struct thread *td, struct dbreg32 *regs)
325 {
326
327         printf("ARM64TODO: set_dbregs32");
328         return (EDOOFUS);
329 }
330 #endif
331
332 int
333 ptrace_set_pc(struct thread *td, u_long addr)
334 {
335
336         printf("ARM64TODO: ptrace_set_pc");
337         return (EDOOFUS);
338 }
339
340 int
341 ptrace_single_step(struct thread *td)
342 {
343
344         td->td_frame->tf_spsr |= PSR_SS;
345         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
346         return (0);
347 }
348
349 int
350 ptrace_clear_single_step(struct thread *td)
351 {
352
353         td->td_frame->tf_spsr &= ~PSR_SS;
354         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
355         return (0);
356 }
357
358 void
359 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
360 {
361         struct trapframe *tf = td->td_frame;
362
363         memset(tf, 0, sizeof(struct trapframe));
364
365         tf->tf_x[0] = stack;
366         tf->tf_sp = STACKALIGN(stack);
367         tf->tf_lr = imgp->entry_addr;
368         tf->tf_elr = imgp->entry_addr;
369 }
370
371 /* Sanity check these are the same size, they will be memcpy'd to and fro */
372 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
373     sizeof((struct gpregs *)0)->gp_x);
374 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
375     sizeof((struct reg *)0)->x);
376
377 int
378 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
379 {
380         struct trapframe *tf = td->td_frame;
381
382         if (clear_ret & GET_MC_CLEAR_RET) {
383                 mcp->mc_gpregs.gp_x[0] = 0;
384                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
385         } else {
386                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
387                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
388         }
389
390         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
391             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
392
393         mcp->mc_gpregs.gp_sp = tf->tf_sp;
394         mcp->mc_gpregs.gp_lr = tf->tf_lr;
395         mcp->mc_gpregs.gp_elr = tf->tf_elr;
396
397         return (0);
398 }
399
400 int
401 set_mcontext(struct thread *td, mcontext_t *mcp)
402 {
403         struct trapframe *tf = td->td_frame;
404         uint32_t spsr;
405
406         spsr = mcp->mc_gpregs.gp_spsr;
407         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
408             (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
409                 return (EINVAL); 
410
411         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
412
413         tf->tf_sp = mcp->mc_gpregs.gp_sp;
414         tf->tf_lr = mcp->mc_gpregs.gp_lr;
415         tf->tf_elr = mcp->mc_gpregs.gp_elr;
416         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
417
418         return (0);
419 }
420
421 static void
422 get_fpcontext(struct thread *td, mcontext_t *mcp)
423 {
424 #ifdef VFP
425         struct pcb *curpcb;
426
427         critical_enter();
428
429         curpcb = curthread->td_pcb;
430
431         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
432                 /*
433                  * If we have just been running VFP instructions we will
434                  * need to save the state to memcpy it below.
435                  */
436                 vfp_save_state(td, curpcb);
437
438                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
439                     ("Called get_fpcontext while the kernel is using the VFP"));
440                 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
441                     ("Non-userspace FPU flags set in get_fpcontext"));
442                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
443                     sizeof(mcp->mc_fpregs));
444                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
445                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
446                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
447                 mcp->mc_flags |= _MC_FP_VALID;
448         }
449
450         critical_exit();
451 #endif
452 }
453
454 static void
455 set_fpcontext(struct thread *td, mcontext_t *mcp)
456 {
457 #ifdef VFP
458         struct pcb *curpcb;
459
460         critical_enter();
461
462         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
463                 curpcb = curthread->td_pcb;
464
465                 /*
466                  * Discard any vfp state for the current thread, we
467                  * are about to override it.
468                  */
469                 vfp_discard(td);
470
471                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
472                     ("Called set_fpcontext while the kernel is using the VFP"));
473                 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
474                     sizeof(mcp->mc_fpregs));
475                 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
476                 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
477                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
478         }
479
480         critical_exit();
481 #endif
482 }
483
484 void
485 cpu_idle(int busy)
486 {
487
488         spinlock_enter();
489         if (!busy)
490                 cpu_idleclock();
491         if (!sched_runnable())
492                 __asm __volatile(
493                     "dsb sy \n"
494                     "wfi    \n");
495         if (!busy)
496                 cpu_activeclock();
497         spinlock_exit();
498 }
499
500 void
501 cpu_halt(void)
502 {
503
504         /* We should have shutdown by now, if not enter a low power sleep */
505         intr_disable();
506         while (1) {
507                 __asm __volatile("wfi");
508         }
509 }
510
511 /*
512  * Flush the D-cache for non-DMA I/O so that the I-cache can
513  * be made coherent later.
514  */
515 void
516 cpu_flush_dcache(void *ptr, size_t len)
517 {
518
519         /* ARM64TODO TBD */
520 }
521
522 /* Get current clock frequency for the given CPU ID. */
523 int
524 cpu_est_clockrate(int cpu_id, uint64_t *rate)
525 {
526         struct pcpu *pc;
527
528         pc = pcpu_find(cpu_id);
529         if (pc == NULL || rate == NULL)
530                 return (EINVAL);
531
532         if (pc->pc_clock == 0)
533                 return (EOPNOTSUPP);
534
535         *rate = pc->pc_clock;
536         return (0);
537 }
538
539 void
540 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
541 {
542
543         pcpu->pc_acpi_id = 0xffffffff;
544 }
545
546 void
547 spinlock_enter(void)
548 {
549         struct thread *td;
550         register_t daif;
551
552         td = curthread;
553         if (td->td_md.md_spinlock_count == 0) {
554                 daif = intr_disable();
555                 td->td_md.md_spinlock_count = 1;
556                 td->td_md.md_saved_daif = daif;
557         } else
558                 td->td_md.md_spinlock_count++;
559         critical_enter();
560 }
561
562 void
563 spinlock_exit(void)
564 {
565         struct thread *td;
566         register_t daif;
567
568         td = curthread;
569         critical_exit();
570         daif = td->td_md.md_saved_daif;
571         td->td_md.md_spinlock_count--;
572         if (td->td_md.md_spinlock_count == 0)
573                 intr_restore(daif);
574 }
575
576 #ifndef _SYS_SYSPROTO_H_
577 struct sigreturn_args {
578         ucontext_t *ucp;
579 };
580 #endif
581
582 int
583 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
584 {
585         ucontext_t uc;
586         int error;
587
588         if (uap == NULL)
589                 return (EFAULT);
590         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
591                 return (EFAULT);
592
593         error = set_mcontext(td, &uc.uc_mcontext);
594         if (error != 0)
595                 return (error);
596         set_fpcontext(td, &uc.uc_mcontext);
597
598         /* Restore signal mask. */
599         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
600
601         return (EJUSTRETURN);
602 }
603
604 /*
605  * Construct a PCB from a trapframe. This is called from kdb_trap() where
606  * we want to start a backtrace from the function that caused us to enter
607  * the debugger. We have the context in the trapframe, but base the trace
608  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
609  * enough for a backtrace.
610  */
611 void
612 makectx(struct trapframe *tf, struct pcb *pcb)
613 {
614         int i;
615
616         for (i = 0; i < PCB_LR; i++)
617                 pcb->pcb_x[i] = tf->tf_x[i];
618
619         pcb->pcb_x[PCB_LR] = tf->tf_lr;
620         pcb->pcb_pc = tf->tf_elr;
621         pcb->pcb_sp = tf->tf_sp;
622 }
623
624 void
625 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
626 {
627         struct thread *td;
628         struct proc *p;
629         struct trapframe *tf;
630         struct sigframe *fp, frame;
631         struct sigacts *psp;
632         struct sysentvec *sysent;
633         int onstack, sig;
634
635         td = curthread;
636         p = td->td_proc;
637         PROC_LOCK_ASSERT(p, MA_OWNED);
638
639         sig = ksi->ksi_signo;
640         psp = p->p_sigacts;
641         mtx_assert(&psp->ps_mtx, MA_OWNED);
642
643         tf = td->td_frame;
644         onstack = sigonstack(tf->tf_sp);
645
646         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
647             catcher, sig);
648
649         /* Allocate and validate space for the signal handler context. */
650         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
651             SIGISMEMBER(psp->ps_sigonstack, sig)) {
652                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
653                     td->td_sigstk.ss_size);
654 #if defined(COMPAT_43)
655                 td->td_sigstk.ss_flags |= SS_ONSTACK;
656 #endif
657         } else {
658                 fp = (struct sigframe *)td->td_frame->tf_sp;
659         }
660
661         /* Make room, keeping the stack aligned */
662         fp--;
663         fp = (struct sigframe *)STACKALIGN(fp);
664
665         /* Fill in the frame to copy out */
666         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
667         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
668         frame.sf_si = ksi->ksi_info;
669         frame.sf_uc.uc_sigmask = *mask;
670         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
671             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
672         frame.sf_uc.uc_stack = td->td_sigstk;
673         mtx_unlock(&psp->ps_mtx);
674         PROC_UNLOCK(td->td_proc);
675
676         /* Copy the sigframe out to the user's stack. */
677         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
678                 /* Process has trashed its stack. Kill it. */
679                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
680                 PROC_LOCK(p);
681                 sigexit(td, SIGILL);
682         }
683
684         tf->tf_x[0]= sig;
685         tf->tf_x[1] = (register_t)&fp->sf_si;
686         tf->tf_x[2] = (register_t)&fp->sf_uc;
687
688         tf->tf_elr = (register_t)catcher;
689         tf->tf_sp = (register_t)fp;
690         sysent = p->p_sysent;
691         if (sysent->sv_sigcode_base != 0)
692                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
693         else
694                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
695                     *(sysent->sv_szsigcode));
696
697         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
698             tf->tf_sp);
699
700         PROC_LOCK(p);
701         mtx_lock(&psp->ps_mtx);
702 }
703
704 static void
705 init_proc0(vm_offset_t kstack)
706 {
707         struct pcpu *pcpup = &__pcpu[0];
708
709         proc_linkup0(&proc0, &thread0);
710         thread0.td_kstack = kstack;
711         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
712         thread0.td_pcb->pcb_fpflags = 0;
713         thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
714         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
715         thread0.td_frame = &proc0_tf;
716         pcpup->pc_curpcb = thread0.td_pcb;
717
718         /* Set the base address of translation table 0. */
719         thread0.td_proc->p_md.md_l0addr = READ_SPECIALREG(ttbr0_el1);
720 }
721
722 typedef struct {
723         uint32_t type;
724         uint64_t phys_start;
725         uint64_t virt_start;
726         uint64_t num_pages;
727         uint64_t attr;
728 } EFI_MEMORY_DESCRIPTOR;
729
730 static int
731 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
732     u_int *physmap_idxp)
733 {
734         u_int i, insert_idx, _physmap_idx;
735
736         _physmap_idx = *physmap_idxp;
737
738         if (length == 0)
739                 return (1);
740
741         /*
742          * Find insertion point while checking for overlap.  Start off by
743          * assuming the new entry will be added to the end.
744          */
745         insert_idx = _physmap_idx;
746         for (i = 0; i <= _physmap_idx; i += 2) {
747                 if (base < physmap[i + 1]) {
748                         if (base + length <= physmap[i]) {
749                                 insert_idx = i;
750                                 break;
751                         }
752                         if (boothowto & RB_VERBOSE)
753                                 printf(
754                     "Overlapping memory regions, ignoring second region\n");
755                         return (1);
756                 }
757         }
758
759         /* See if we can prepend to the next entry. */
760         if (insert_idx <= _physmap_idx &&
761             base + length == physmap[insert_idx]) {
762                 physmap[insert_idx] = base;
763                 return (1);
764         }
765
766         /* See if we can append to the previous entry. */
767         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
768                 physmap[insert_idx - 1] += length;
769                 return (1);
770         }
771
772         _physmap_idx += 2;
773         *physmap_idxp = _physmap_idx;
774         if (_physmap_idx == PHYSMAP_SIZE) {
775                 printf(
776                 "Too many segments in the physical address map, giving up\n");
777                 return (0);
778         }
779
780         /*
781          * Move the last 'N' entries down to make room for the new
782          * entry if needed.
783          */
784         for (i = _physmap_idx; i > insert_idx; i -= 2) {
785                 physmap[i] = physmap[i - 2];
786                 physmap[i + 1] = physmap[i - 1];
787         }
788
789         /* Insert the new entry. */
790         physmap[insert_idx] = base;
791         physmap[insert_idx + 1] = base + length;
792         return (1);
793 }
794
795 #ifdef FDT
796 static void
797 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
798     u_int *physmap_idxp)
799 {
800
801         for (int i = 0; i < mrcnt; i++) {
802                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
803                     physmap_idxp))
804                         break;
805         }
806 }
807 #endif
808
809 static void
810 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
811     u_int *physmap_idxp)
812 {
813         struct efi_md *map, *p;
814         const char *type;
815         size_t efisz;
816         int ndesc, i;
817
818         static const char *types[] = {
819                 "Reserved",
820                 "LoaderCode",
821                 "LoaderData",
822                 "BootServicesCode",
823                 "BootServicesData",
824                 "RuntimeServicesCode",
825                 "RuntimeServicesData",
826                 "ConventionalMemory",
827                 "UnusableMemory",
828                 "ACPIReclaimMemory",
829                 "ACPIMemoryNVS",
830                 "MemoryMappedIO",
831                 "MemoryMappedIOPortSpace",
832                 "PalCode",
833                 "PersistentMemory"
834         };
835
836         /*
837          * Memory map data provided by UEFI via the GetMemoryMap
838          * Boot Services API.
839          */
840         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
841         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
842
843         if (efihdr->descriptor_size == 0)
844                 return;
845         ndesc = efihdr->memory_size / efihdr->descriptor_size;
846
847         if (boothowto & RB_VERBOSE)
848                 printf("%23s %12s %12s %8s %4s\n",
849                     "Type", "Physical", "Virtual", "#Pages", "Attr");
850
851         for (i = 0, p = map; i < ndesc; i++,
852             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
853                 if (boothowto & RB_VERBOSE) {
854                         if (p->md_type < nitems(types))
855                                 type = types[p->md_type];
856                         else
857                                 type = "<INVALID>";
858                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
859                             p->md_virt, p->md_pages);
860                         if (p->md_attr & EFI_MD_ATTR_UC)
861                                 printf("UC ");
862                         if (p->md_attr & EFI_MD_ATTR_WC)
863                                 printf("WC ");
864                         if (p->md_attr & EFI_MD_ATTR_WT)
865                                 printf("WT ");
866                         if (p->md_attr & EFI_MD_ATTR_WB)
867                                 printf("WB ");
868                         if (p->md_attr & EFI_MD_ATTR_UCE)
869                                 printf("UCE ");
870                         if (p->md_attr & EFI_MD_ATTR_WP)
871                                 printf("WP ");
872                         if (p->md_attr & EFI_MD_ATTR_RP)
873                                 printf("RP ");
874                         if (p->md_attr & EFI_MD_ATTR_XP)
875                                 printf("XP ");
876                         if (p->md_attr & EFI_MD_ATTR_NV)
877                                 printf("NV ");
878                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
879                                 printf("MORE_RELIABLE ");
880                         if (p->md_attr & EFI_MD_ATTR_RO)
881                                 printf("RO ");
882                         if (p->md_attr & EFI_MD_ATTR_RT)
883                                 printf("RUNTIME");
884                         printf("\n");
885                 }
886
887                 switch (p->md_type) {
888                 case EFI_MD_TYPE_CODE:
889                 case EFI_MD_TYPE_DATA:
890                 case EFI_MD_TYPE_BS_CODE:
891                 case EFI_MD_TYPE_BS_DATA:
892                 case EFI_MD_TYPE_FREE:
893                         /*
894                          * We're allowed to use any entry with these types.
895                          */
896                         break;
897                 default:
898                         continue;
899                 }
900
901                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
902                     physmap, physmap_idxp))
903                         break;
904         }
905 }
906
907 #ifdef FDT
908 static void
909 try_load_dtb(caddr_t kmdp)
910 {
911         vm_offset_t dtbp;
912
913         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
914         if (dtbp == (vm_offset_t)NULL) {
915                 printf("ERROR loading DTB\n");
916                 return;
917         }
918
919         if (OF_install(OFW_FDT, 0) == FALSE)
920                 panic("Cannot install FDT");
921
922         if (OF_init((void *)dtbp) != 0)
923                 panic("OF_init failed with the found device tree");
924 }
925 #endif
926
927 static bool
928 bus_probe(void)
929 {
930         bool has_acpi, has_fdt;
931         char *order, *env;
932
933         has_acpi = has_fdt = false;
934
935 #ifdef FDT
936         has_fdt = (OF_peer(0) != 0);
937 #endif
938 #ifdef DEV_ACPI
939         has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
940 #endif
941
942         env = kern_getenv("kern.cfg.order");
943         if (env != NULL) {
944                 order = env;
945                 while (order != NULL) {
946                         if (has_acpi &&
947                             strncmp(order, "acpi", 4) == 0 &&
948                             (order[4] == ',' || order[4] == '\0')) {
949                                 arm64_bus_method = ARM64_BUS_ACPI;
950                                 break;
951                         }
952                         if (has_fdt &&
953                             strncmp(order, "fdt", 3) == 0 &&
954                             (order[3] == ',' || order[3] == '\0')) {
955                                 arm64_bus_method = ARM64_BUS_FDT;
956                                 break;
957                         }
958                         order = strchr(order, ',');
959                 }
960                 freeenv(env);
961
962                 /* If we set the bus method it is valid */
963                 if (arm64_bus_method != ARM64_BUS_NONE)
964                         return (true);
965         }
966         /* If no order or an invalid order was set use the default */
967         if (arm64_bus_method == ARM64_BUS_NONE) {
968                 if (has_fdt)
969                         arm64_bus_method = ARM64_BUS_FDT;
970                 else if (has_acpi)
971                         arm64_bus_method = ARM64_BUS_ACPI;
972         }
973
974         /*
975          * If no option was set the default is valid, otherwise we are
976          * setting one to get cninit() working, then calling panic to tell
977          * the user about the invalid bus setup.
978          */
979         return (env == NULL);
980 }
981
982 static void
983 cache_setup(void)
984 {
985         int dcache_line_shift, icache_line_shift, dczva_line_shift;
986         uint32_t ctr_el0;
987         uint32_t dczid_el0;
988
989         ctr_el0 = READ_SPECIALREG(ctr_el0);
990
991         /* Read the log2 words in each D cache line */
992         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
993         /* Get the D cache line size */
994         dcache_line_size = sizeof(int) << dcache_line_shift;
995
996         /* And the same for the I cache */
997         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
998         icache_line_size = sizeof(int) << icache_line_shift;
999
1000         idcache_line_size = MIN(dcache_line_size, icache_line_size);
1001
1002         dczid_el0 = READ_SPECIALREG(dczid_el0);
1003
1004         /* Check if dc zva is not prohibited */
1005         if (dczid_el0 & DCZID_DZP)
1006                 dczva_line_size = 0;
1007         else {
1008                 /* Same as with above calculations */
1009                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1010                 dczva_line_size = sizeof(int) << dczva_line_shift;
1011
1012                 /* Change pagezero function */
1013                 pagezero = pagezero_cache;
1014         }
1015 }
1016
1017 void
1018 initarm(struct arm64_bootparams *abp)
1019 {
1020         struct efi_map_header *efihdr;
1021         struct pcpu *pcpup;
1022         char *env;
1023 #ifdef FDT
1024         struct mem_region mem_regions[FDT_MEM_REGIONS];
1025         int mem_regions_sz;
1026 #endif
1027         vm_offset_t lastaddr;
1028         caddr_t kmdp;
1029         vm_paddr_t mem_len;
1030         bool valid;
1031         int i;
1032
1033         /* Set the module data location */
1034         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
1035
1036         /* Find the kernel address */
1037         kmdp = preload_search_by_type("elf kernel");
1038         if (kmdp == NULL)
1039                 kmdp = preload_search_by_type("elf64 kernel");
1040
1041         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1042         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
1043
1044 #ifdef FDT
1045         try_load_dtb(kmdp);
1046 #endif
1047
1048         efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1049
1050         /* Find the address to start allocating from */
1051         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1052
1053         /* Load the physical memory ranges */
1054         physmap_idx = 0;
1055         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1056             MODINFO_METADATA | MODINFOMD_EFI_MAP);
1057         if (efihdr != NULL)
1058                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
1059 #ifdef FDT
1060         else {
1061                 /* Grab physical memory regions information from device tree. */
1062                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1063                     NULL) != 0)
1064                         panic("Cannot get physical memory regions");
1065                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
1066                     &physmap_idx);
1067         }
1068 #endif
1069
1070         /* Print the memory map */
1071         mem_len = 0;
1072         for (i = 0; i < physmap_idx; i += 2) {
1073                 dump_avail[i] = physmap[i];
1074                 dump_avail[i + 1] = physmap[i + 1];
1075                 mem_len += physmap[i + 1] - physmap[i];
1076         }
1077         dump_avail[i] = 0;
1078         dump_avail[i + 1] = 0;
1079
1080         /* Set the pcpu data, this is needed by pmap_bootstrap */
1081         pcpup = &__pcpu[0];
1082         pcpu_init(pcpup, 0, sizeof(struct pcpu));
1083
1084         /*
1085          * Set the pcpu pointer with a backup in tpidr_el1 to be
1086          * loaded when entering the kernel from userland.
1087          */
1088         __asm __volatile(
1089             "mov x18, %0 \n"
1090             "msr tpidr_el1, %0" :: "r"(pcpup));
1091
1092         PCPU_SET(curthread, &thread0);
1093
1094         /* Do basic tuning, hz etc */
1095         init_param1();
1096
1097         cache_setup();
1098         pan_setup();
1099
1100         /* Bootstrap enough of pmap  to enter the kernel proper */
1101         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1102             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1103
1104         devmap_bootstrap(0, NULL);
1105
1106         valid = bus_probe();
1107
1108         cninit();
1109
1110         if (!valid)
1111                 panic("Invalid bus configuration: %s",
1112                     kern_getenv("kern.cfg.order"));
1113
1114         init_proc0(abp->kern_stack);
1115         msgbufinit(msgbufp, msgbufsize);
1116         mutex_init();
1117         init_param2(physmem);
1118
1119         dbg_init();
1120         kdb_init();
1121         pan_enable();
1122
1123         env = kern_getenv("kernelname");
1124         if (env != NULL)
1125                 strlcpy(kernelname, env, sizeof(kernelname));
1126
1127         early_boot = 0;
1128 }
1129
1130 void
1131 dbg_init(void)
1132 {
1133
1134         /* Clear OS lock */
1135         WRITE_SPECIALREG(OSLAR_EL1, 0);
1136
1137         /* This permits DDB to use debug registers for watchpoints. */
1138         dbg_monitor_init();
1139
1140         /* TODO: Eventually will need to initialize debug registers here. */
1141 }
1142
1143 #ifdef DDB
1144 #include <ddb/ddb.h>
1145
1146 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1147 {
1148 #define PRINT_REG(reg)  \
1149     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1150
1151         PRINT_REG(actlr_el1);
1152         PRINT_REG(afsr0_el1);
1153         PRINT_REG(afsr1_el1);
1154         PRINT_REG(aidr_el1);
1155         PRINT_REG(amair_el1);
1156         PRINT_REG(ccsidr_el1);
1157         PRINT_REG(clidr_el1);
1158         PRINT_REG(contextidr_el1);
1159         PRINT_REG(cpacr_el1);
1160         PRINT_REG(csselr_el1);
1161         PRINT_REG(ctr_el0);
1162         PRINT_REG(currentel);
1163         PRINT_REG(daif);
1164         PRINT_REG(dczid_el0);
1165         PRINT_REG(elr_el1);
1166         PRINT_REG(esr_el1);
1167         PRINT_REG(far_el1);
1168 #if 0
1169         /* ARM64TODO: Enable VFP before reading floating-point registers */
1170         PRINT_REG(fpcr);
1171         PRINT_REG(fpsr);
1172 #endif
1173         PRINT_REG(id_aa64afr0_el1);
1174         PRINT_REG(id_aa64afr1_el1);
1175         PRINT_REG(id_aa64dfr0_el1);
1176         PRINT_REG(id_aa64dfr1_el1);
1177         PRINT_REG(id_aa64isar0_el1);
1178         PRINT_REG(id_aa64isar1_el1);
1179         PRINT_REG(id_aa64pfr0_el1);
1180         PRINT_REG(id_aa64pfr1_el1);
1181         PRINT_REG(id_afr0_el1);
1182         PRINT_REG(id_dfr0_el1);
1183         PRINT_REG(id_isar0_el1);
1184         PRINT_REG(id_isar1_el1);
1185         PRINT_REG(id_isar2_el1);
1186         PRINT_REG(id_isar3_el1);
1187         PRINT_REG(id_isar4_el1);
1188         PRINT_REG(id_isar5_el1);
1189         PRINT_REG(id_mmfr0_el1);
1190         PRINT_REG(id_mmfr1_el1);
1191         PRINT_REG(id_mmfr2_el1);
1192         PRINT_REG(id_mmfr3_el1);
1193 #if 0
1194         /* Missing from llvm */
1195         PRINT_REG(id_mmfr4_el1);
1196 #endif
1197         PRINT_REG(id_pfr0_el1);
1198         PRINT_REG(id_pfr1_el1);
1199         PRINT_REG(isr_el1);
1200         PRINT_REG(mair_el1);
1201         PRINT_REG(midr_el1);
1202         PRINT_REG(mpidr_el1);
1203         PRINT_REG(mvfr0_el1);
1204         PRINT_REG(mvfr1_el1);
1205         PRINT_REG(mvfr2_el1);
1206         PRINT_REG(revidr_el1);
1207         PRINT_REG(sctlr_el1);
1208         PRINT_REG(sp_el0);
1209         PRINT_REG(spsel);
1210         PRINT_REG(spsr_el1);
1211         PRINT_REG(tcr_el1);
1212         PRINT_REG(tpidr_el0);
1213         PRINT_REG(tpidr_el1);
1214         PRINT_REG(tpidrro_el0);
1215         PRINT_REG(ttbr0_el1);
1216         PRINT_REG(ttbr1_el1);
1217         PRINT_REG(vbar_el1);
1218 #undef PRINT_REG
1219 }
1220
1221 DB_SHOW_COMMAND(vtop, db_show_vtop)
1222 {
1223         uint64_t phys;
1224
1225         if (have_addr) {
1226                 phys = arm64_address_translate_s1e1r(addr);
1227                 db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1228                 phys = arm64_address_translate_s1e1w(addr);
1229                 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1230                 phys = arm64_address_translate_s1e0r(addr);
1231                 db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1232                 phys = arm64_address_translate_s1e0w(addr);
1233                 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1234         } else
1235                 db_printf("show vtop <virt_addr>\n");
1236 }
1237 #endif