]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
Merge ACPICA 20180105.
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_acpi.h"
29 #include "opt_compat.h"
30 #include "opt_platform.h"
31 #include "opt_ddb.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/buf.h>
39 #include <sys/bus.h>
40 #include <sys/cons.h>
41 #include <sys/cpu.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h> 
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/linker.h>
50 #include <sys/msgbuf.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/ptrace.h>
54 #include <sys/reboot.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
57 #include <sys/signalvar.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysent.h>
60 #include <sys/sysproto.h>
61 #include <sys/ucontext.h>
62 #include <sys/vdso.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pager.h>
71
72 #include <machine/armreg.h>
73 #include <machine/cpu.h>
74 #include <machine/debug_monitor.h>
75 #include <machine/kdb.h>
76 #include <machine/machdep.h>
77 #include <machine/metadata.h>
78 #include <machine/md_var.h>
79 #include <machine/pcb.h>
80 #include <machine/reg.h>
81 #include <machine/undefined.h>
82 #include <machine/vmparam.h>
83
84 #ifdef VFP
85 #include <machine/vfp.h>
86 #endif
87
88 #ifdef DEV_ACPI
89 #include <contrib/dev/acpica/include/acpi.h>
90 #include <machine/acpica_machdep.h>
91 #endif
92
93 #ifdef FDT
94 #include <dev/fdt/fdt_common.h>
95 #include <dev/ofw/openfirm.h>
96 #endif
97
98
99 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
100
101 struct pcpu __pcpu[MAXCPU];
102
103 static struct trapframe proc0_tf;
104
105 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
106 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
107
108 int early_boot = 1;
109 int cold = 1;
110 long realmem = 0;
111 long Maxmem = 0;
112
113 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
114 vm_paddr_t physmap[PHYSMAP_SIZE];
115 u_int physmap_idx;
116
117 struct kva_md_info kmi;
118
119 int64_t dcache_line_size;       /* The minimum D cache line size */
120 int64_t icache_line_size;       /* The minimum I cache line size */
121 int64_t idcache_line_size;      /* The minimum cache line size */
122 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
123 int has_pan;
124
125 /*
126  * Physical address of the EFI System Table. Stashed from the metadata hints
127  * passed into the kernel and used by the EFI code to call runtime services.
128  */
129 vm_paddr_t efi_systbl_phys;
130
131 /* pagezero_* implementations are provided in support.S */
132 void pagezero_simple(void *);
133 void pagezero_cache(void *);
134
135 /* pagezero_simple is default pagezero */
136 void (*pagezero)(void *p) = pagezero_simple;
137
138 static void
139 pan_setup(void)
140 {
141         uint64_t id_aa64mfr1;
142
143         id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
144         if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
145                 has_pan = 1;
146 }
147
148 void
149 pan_enable(void)
150 {
151
152         /*
153          * The LLVM integrated assembler doesn't understand the PAN
154          * PSTATE field. Because of this we need to manually create
155          * the instruction in an asm block. This is equivalent to:
156          * msr pan, #1
157          *
158          * This sets the PAN bit, stopping the kernel from accessing
159          * memory when userspace can also access it unless the kernel
160          * uses the userspace load/store instructions.
161          */
162         if (has_pan) {
163                 WRITE_SPECIALREG(sctlr_el1,
164                     READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
165                 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
166         }
167 }
168
169 static void
170 cpu_startup(void *dummy)
171 {
172
173         undef_init();
174         identify_cpu();
175
176         vm_ksubmap_init(&kmi);
177         bufinit();
178         vm_pager_bufferinit();
179 }
180
181 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
182
183 int
184 cpu_idle_wakeup(int cpu)
185 {
186
187         return (0);
188 }
189
190 int
191 fill_regs(struct thread *td, struct reg *regs)
192 {
193         struct trapframe *frame;
194
195         frame = td->td_frame;
196         regs->sp = frame->tf_sp;
197         regs->lr = frame->tf_lr;
198         regs->elr = frame->tf_elr;
199         regs->spsr = frame->tf_spsr;
200
201         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
202
203         return (0);
204 }
205
206 int
207 set_regs(struct thread *td, struct reg *regs)
208 {
209         struct trapframe *frame;
210
211         frame = td->td_frame;
212         frame->tf_sp = regs->sp;
213         frame->tf_lr = regs->lr;
214         frame->tf_elr = regs->elr;
215         frame->tf_spsr &= ~PSR_FLAGS;
216         frame->tf_spsr |= regs->spsr & PSR_FLAGS;
217
218         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
219
220         return (0);
221 }
222
223 int
224 fill_fpregs(struct thread *td, struct fpreg *regs)
225 {
226 #ifdef VFP
227         struct pcb *pcb;
228
229         pcb = td->td_pcb;
230         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
231                 /*
232                  * If we have just been running VFP instructions we will
233                  * need to save the state to memcpy it below.
234                  */
235                 if (td == curthread)
236                         vfp_save_state(td, pcb);
237
238                 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
239                     ("Called fill_fpregs while the kernel is using the VFP"));
240                 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
241                     sizeof(regs->fp_q));
242                 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
243                 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
244         } else
245 #endif
246                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
247         return (0);
248 }
249
250 int
251 set_fpregs(struct thread *td, struct fpreg *regs)
252 {
253 #ifdef VFP
254         struct pcb *pcb;
255
256         pcb = td->td_pcb;
257         KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
258             ("Called set_fpregs while the kernel is using the VFP"));
259         memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
260         pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
261         pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
262 #endif
263         return (0);
264 }
265
266 int
267 fill_dbregs(struct thread *td, struct dbreg *regs)
268 {
269
270         printf("ARM64TODO: fill_dbregs");
271         return (EDOOFUS);
272 }
273
274 int
275 set_dbregs(struct thread *td, struct dbreg *regs)
276 {
277
278         printf("ARM64TODO: set_dbregs");
279         return (EDOOFUS);
280 }
281
282 #ifdef COMPAT_FREEBSD32
283 int
284 fill_regs32(struct thread *td, struct reg32 *regs)
285 {
286
287         printf("ARM64TODO: fill_regs32");
288         return (EDOOFUS);
289 }
290
291 int
292 set_regs32(struct thread *td, struct reg32 *regs)
293 {
294
295         printf("ARM64TODO: set_regs32");
296         return (EDOOFUS);
297 }
298
299 int
300 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
301 {
302
303         printf("ARM64TODO: fill_fpregs32");
304         return (EDOOFUS);
305 }
306
307 int
308 set_fpregs32(struct thread *td, struct fpreg32 *regs)
309 {
310
311         printf("ARM64TODO: set_fpregs32");
312         return (EDOOFUS);
313 }
314
315 int
316 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
317 {
318
319         printf("ARM64TODO: fill_dbregs32");
320         return (EDOOFUS);
321 }
322
323 int
324 set_dbregs32(struct thread *td, struct dbreg32 *regs)
325 {
326
327         printf("ARM64TODO: set_dbregs32");
328         return (EDOOFUS);
329 }
330 #endif
331
332 int
333 ptrace_set_pc(struct thread *td, u_long addr)
334 {
335
336         printf("ARM64TODO: ptrace_set_pc");
337         return (EDOOFUS);
338 }
339
340 int
341 ptrace_single_step(struct thread *td)
342 {
343
344         td->td_frame->tf_spsr |= PSR_SS;
345         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
346         return (0);
347 }
348
349 int
350 ptrace_clear_single_step(struct thread *td)
351 {
352
353         td->td_frame->tf_spsr &= ~PSR_SS;
354         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
355         return (0);
356 }
357
358 void
359 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
360 {
361         struct trapframe *tf = td->td_frame;
362
363         memset(tf, 0, sizeof(struct trapframe));
364
365         tf->tf_x[0] = stack;
366         tf->tf_sp = STACKALIGN(stack);
367         tf->tf_lr = imgp->entry_addr;
368         tf->tf_elr = imgp->entry_addr;
369 }
370
371 /* Sanity check these are the same size, they will be memcpy'd to and fro */
372 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
373     sizeof((struct gpregs *)0)->gp_x);
374 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
375     sizeof((struct reg *)0)->x);
376
377 int
378 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
379 {
380         struct trapframe *tf = td->td_frame;
381
382         if (clear_ret & GET_MC_CLEAR_RET) {
383                 mcp->mc_gpregs.gp_x[0] = 0;
384                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
385         } else {
386                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
387                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
388         }
389
390         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
391             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
392
393         mcp->mc_gpregs.gp_sp = tf->tf_sp;
394         mcp->mc_gpregs.gp_lr = tf->tf_lr;
395         mcp->mc_gpregs.gp_elr = tf->tf_elr;
396
397         return (0);
398 }
399
400 int
401 set_mcontext(struct thread *td, mcontext_t *mcp)
402 {
403         struct trapframe *tf = td->td_frame;
404         uint32_t spsr;
405
406         spsr = mcp->mc_gpregs.gp_spsr;
407         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
408             (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
409                 return (EINVAL); 
410
411         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
412
413         tf->tf_sp = mcp->mc_gpregs.gp_sp;
414         tf->tf_lr = mcp->mc_gpregs.gp_lr;
415         tf->tf_elr = mcp->mc_gpregs.gp_elr;
416         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
417
418         return (0);
419 }
420
421 static void
422 get_fpcontext(struct thread *td, mcontext_t *mcp)
423 {
424 #ifdef VFP
425         struct pcb *curpcb;
426
427         critical_enter();
428
429         curpcb = curthread->td_pcb;
430
431         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
432                 /*
433                  * If we have just been running VFP instructions we will
434                  * need to save the state to memcpy it below.
435                  */
436                 vfp_save_state(td, curpcb);
437
438                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
439                     ("Called get_fpcontext while the kernel is using the VFP"));
440                 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
441                     ("Non-userspace FPU flags set in get_fpcontext"));
442                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
443                     sizeof(mcp->mc_fpregs));
444                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
445                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
446                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
447                 mcp->mc_flags |= _MC_FP_VALID;
448         }
449
450         critical_exit();
451 #endif
452 }
453
454 static void
455 set_fpcontext(struct thread *td, mcontext_t *mcp)
456 {
457 #ifdef VFP
458         struct pcb *curpcb;
459
460         critical_enter();
461
462         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
463                 curpcb = curthread->td_pcb;
464
465                 /*
466                  * Discard any vfp state for the current thread, we
467                  * are about to override it.
468                  */
469                 vfp_discard(td);
470
471                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
472                     ("Called set_fpcontext while the kernel is using the VFP"));
473                 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
474                     sizeof(mcp->mc_fpregs));
475                 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
476                 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
477                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
478         }
479
480         critical_exit();
481 #endif
482 }
483
484 void
485 cpu_idle(int busy)
486 {
487
488         spinlock_enter();
489         if (!busy)
490                 cpu_idleclock();
491         if (!sched_runnable())
492                 __asm __volatile(
493                     "dsb sy \n"
494                     "wfi    \n");
495         if (!busy)
496                 cpu_activeclock();
497         spinlock_exit();
498 }
499
500 void
501 cpu_halt(void)
502 {
503
504         /* We should have shutdown by now, if not enter a low power sleep */
505         intr_disable();
506         while (1) {
507                 __asm __volatile("wfi");
508         }
509 }
510
511 /*
512  * Flush the D-cache for non-DMA I/O so that the I-cache can
513  * be made coherent later.
514  */
515 void
516 cpu_flush_dcache(void *ptr, size_t len)
517 {
518
519         /* ARM64TODO TBD */
520 }
521
522 /* Get current clock frequency for the given CPU ID. */
523 int
524 cpu_est_clockrate(int cpu_id, uint64_t *rate)
525 {
526         struct pcpu *pc;
527
528         pc = pcpu_find(cpu_id);
529         if (pc == NULL || rate == NULL)
530                 return (EINVAL);
531
532         if (pc->pc_clock == 0)
533                 return (EOPNOTSUPP);
534
535         *rate = pc->pc_clock;
536         return (0);
537 }
538
539 void
540 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
541 {
542
543         pcpu->pc_acpi_id = 0xffffffff;
544 }
545
546 void
547 spinlock_enter(void)
548 {
549         struct thread *td;
550         register_t daif;
551
552         td = curthread;
553         if (td->td_md.md_spinlock_count == 0) {
554                 daif = intr_disable();
555                 td->td_md.md_spinlock_count = 1;
556                 td->td_md.md_saved_daif = daif;
557         } else
558                 td->td_md.md_spinlock_count++;
559         critical_enter();
560 }
561
562 void
563 spinlock_exit(void)
564 {
565         struct thread *td;
566         register_t daif;
567
568         td = curthread;
569         critical_exit();
570         daif = td->td_md.md_saved_daif;
571         td->td_md.md_spinlock_count--;
572         if (td->td_md.md_spinlock_count == 0)
573                 intr_restore(daif);
574 }
575
576 #ifndef _SYS_SYSPROTO_H_
577 struct sigreturn_args {
578         ucontext_t *ucp;
579 };
580 #endif
581
582 int
583 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
584 {
585         ucontext_t uc;
586         int error;
587
588         if (uap == NULL)
589                 return (EFAULT);
590         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
591                 return (EFAULT);
592
593         error = set_mcontext(td, &uc.uc_mcontext);
594         if (error != 0)
595                 return (error);
596         set_fpcontext(td, &uc.uc_mcontext);
597
598         /* Restore signal mask. */
599         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
600
601         return (EJUSTRETURN);
602 }
603
604 /*
605  * Construct a PCB from a trapframe. This is called from kdb_trap() where
606  * we want to start a backtrace from the function that caused us to enter
607  * the debugger. We have the context in the trapframe, but base the trace
608  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
609  * enough for a backtrace.
610  */
611 void
612 makectx(struct trapframe *tf, struct pcb *pcb)
613 {
614         int i;
615
616         for (i = 0; i < PCB_LR; i++)
617                 pcb->pcb_x[i] = tf->tf_x[i];
618
619         pcb->pcb_x[PCB_LR] = tf->tf_lr;
620         pcb->pcb_pc = tf->tf_elr;
621         pcb->pcb_sp = tf->tf_sp;
622 }
623
624 void
625 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
626 {
627         struct thread *td;
628         struct proc *p;
629         struct trapframe *tf;
630         struct sigframe *fp, frame;
631         struct sigacts *psp;
632         struct sysentvec *sysent;
633         int onstack, sig;
634
635         td = curthread;
636         p = td->td_proc;
637         PROC_LOCK_ASSERT(p, MA_OWNED);
638
639         sig = ksi->ksi_signo;
640         psp = p->p_sigacts;
641         mtx_assert(&psp->ps_mtx, MA_OWNED);
642
643         tf = td->td_frame;
644         onstack = sigonstack(tf->tf_sp);
645
646         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
647             catcher, sig);
648
649         /* Allocate and validate space for the signal handler context. */
650         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
651             SIGISMEMBER(psp->ps_sigonstack, sig)) {
652                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
653                     td->td_sigstk.ss_size);
654 #if defined(COMPAT_43)
655                 td->td_sigstk.ss_flags |= SS_ONSTACK;
656 #endif
657         } else {
658                 fp = (struct sigframe *)td->td_frame->tf_sp;
659         }
660
661         /* Make room, keeping the stack aligned */
662         fp--;
663         fp = (struct sigframe *)STACKALIGN(fp);
664
665         /* Fill in the frame to copy out */
666         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
667         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
668         frame.sf_si = ksi->ksi_info;
669         frame.sf_uc.uc_sigmask = *mask;
670         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
671             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
672         frame.sf_uc.uc_stack = td->td_sigstk;
673         mtx_unlock(&psp->ps_mtx);
674         PROC_UNLOCK(td->td_proc);
675
676         /* Copy the sigframe out to the user's stack. */
677         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
678                 /* Process has trashed its stack. Kill it. */
679                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
680                 PROC_LOCK(p);
681                 sigexit(td, SIGILL);
682         }
683
684         tf->tf_x[0]= sig;
685         tf->tf_x[1] = (register_t)&fp->sf_si;
686         tf->tf_x[2] = (register_t)&fp->sf_uc;
687
688         tf->tf_elr = (register_t)catcher;
689         tf->tf_sp = (register_t)fp;
690         sysent = p->p_sysent;
691         if (sysent->sv_sigcode_base != 0)
692                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
693         else
694                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
695                     *(sysent->sv_szsigcode));
696
697         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
698             tf->tf_sp);
699
700         PROC_LOCK(p);
701         mtx_lock(&psp->ps_mtx);
702 }
703
704 static void
705 init_proc0(vm_offset_t kstack)
706 {
707         struct pcpu *pcpup = &__pcpu[0];
708
709         proc_linkup0(&proc0, &thread0);
710         thread0.td_kstack = kstack;
711         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
712         thread0.td_pcb->pcb_fpflags = 0;
713         thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
714         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
715         thread0.td_frame = &proc0_tf;
716         pcpup->pc_curpcb = thread0.td_pcb;
717 }
718
719 typedef struct {
720         uint32_t type;
721         uint64_t phys_start;
722         uint64_t virt_start;
723         uint64_t num_pages;
724         uint64_t attr;
725 } EFI_MEMORY_DESCRIPTOR;
726
727 static int
728 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
729     u_int *physmap_idxp)
730 {
731         u_int i, insert_idx, _physmap_idx;
732
733         _physmap_idx = *physmap_idxp;
734
735         if (length == 0)
736                 return (1);
737
738         /*
739          * Find insertion point while checking for overlap.  Start off by
740          * assuming the new entry will be added to the end.
741          */
742         insert_idx = _physmap_idx;
743         for (i = 0; i <= _physmap_idx; i += 2) {
744                 if (base < physmap[i + 1]) {
745                         if (base + length <= physmap[i]) {
746                                 insert_idx = i;
747                                 break;
748                         }
749                         if (boothowto & RB_VERBOSE)
750                                 printf(
751                     "Overlapping memory regions, ignoring second region\n");
752                         return (1);
753                 }
754         }
755
756         /* See if we can prepend to the next entry. */
757         if (insert_idx <= _physmap_idx &&
758             base + length == physmap[insert_idx]) {
759                 physmap[insert_idx] = base;
760                 return (1);
761         }
762
763         /* See if we can append to the previous entry. */
764         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
765                 physmap[insert_idx - 1] += length;
766                 return (1);
767         }
768
769         _physmap_idx += 2;
770         *physmap_idxp = _physmap_idx;
771         if (_physmap_idx == PHYSMAP_SIZE) {
772                 printf(
773                 "Too many segments in the physical address map, giving up\n");
774                 return (0);
775         }
776
777         /*
778          * Move the last 'N' entries down to make room for the new
779          * entry if needed.
780          */
781         for (i = _physmap_idx; i > insert_idx; i -= 2) {
782                 physmap[i] = physmap[i - 2];
783                 physmap[i + 1] = physmap[i - 1];
784         }
785
786         /* Insert the new entry. */
787         physmap[insert_idx] = base;
788         physmap[insert_idx + 1] = base + length;
789         return (1);
790 }
791
792 #ifdef FDT
793 static void
794 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
795     u_int *physmap_idxp)
796 {
797
798         for (int i = 0; i < mrcnt; i++) {
799                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
800                     physmap_idxp))
801                         break;
802         }
803 }
804 #endif
805
806 static void
807 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
808     u_int *physmap_idxp)
809 {
810         struct efi_md *map, *p;
811         const char *type;
812         size_t efisz;
813         int ndesc, i;
814
815         static const char *types[] = {
816                 "Reserved",
817                 "LoaderCode",
818                 "LoaderData",
819                 "BootServicesCode",
820                 "BootServicesData",
821                 "RuntimeServicesCode",
822                 "RuntimeServicesData",
823                 "ConventionalMemory",
824                 "UnusableMemory",
825                 "ACPIReclaimMemory",
826                 "ACPIMemoryNVS",
827                 "MemoryMappedIO",
828                 "MemoryMappedIOPortSpace",
829                 "PalCode",
830                 "PersistentMemory"
831         };
832
833         /*
834          * Memory map data provided by UEFI via the GetMemoryMap
835          * Boot Services API.
836          */
837         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
838         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
839
840         if (efihdr->descriptor_size == 0)
841                 return;
842         ndesc = efihdr->memory_size / efihdr->descriptor_size;
843
844         if (boothowto & RB_VERBOSE)
845                 printf("%23s %12s %12s %8s %4s\n",
846                     "Type", "Physical", "Virtual", "#Pages", "Attr");
847
848         for (i = 0, p = map; i < ndesc; i++,
849             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
850                 if (boothowto & RB_VERBOSE) {
851                         if (p->md_type < nitems(types))
852                                 type = types[p->md_type];
853                         else
854                                 type = "<INVALID>";
855                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
856                             p->md_virt, p->md_pages);
857                         if (p->md_attr & EFI_MD_ATTR_UC)
858                                 printf("UC ");
859                         if (p->md_attr & EFI_MD_ATTR_WC)
860                                 printf("WC ");
861                         if (p->md_attr & EFI_MD_ATTR_WT)
862                                 printf("WT ");
863                         if (p->md_attr & EFI_MD_ATTR_WB)
864                                 printf("WB ");
865                         if (p->md_attr & EFI_MD_ATTR_UCE)
866                                 printf("UCE ");
867                         if (p->md_attr & EFI_MD_ATTR_WP)
868                                 printf("WP ");
869                         if (p->md_attr & EFI_MD_ATTR_RP)
870                                 printf("RP ");
871                         if (p->md_attr & EFI_MD_ATTR_XP)
872                                 printf("XP ");
873                         if (p->md_attr & EFI_MD_ATTR_NV)
874                                 printf("NV ");
875                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
876                                 printf("MORE_RELIABLE ");
877                         if (p->md_attr & EFI_MD_ATTR_RO)
878                                 printf("RO ");
879                         if (p->md_attr & EFI_MD_ATTR_RT)
880                                 printf("RUNTIME");
881                         printf("\n");
882                 }
883
884                 switch (p->md_type) {
885                 case EFI_MD_TYPE_CODE:
886                 case EFI_MD_TYPE_DATA:
887                 case EFI_MD_TYPE_BS_CODE:
888                 case EFI_MD_TYPE_BS_DATA:
889                 case EFI_MD_TYPE_FREE:
890                         /*
891                          * We're allowed to use any entry with these types.
892                          */
893                         break;
894                 default:
895                         continue;
896                 }
897
898                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
899                     physmap, physmap_idxp))
900                         break;
901         }
902 }
903
904 #ifdef FDT
905 static void
906 try_load_dtb(caddr_t kmdp)
907 {
908         vm_offset_t dtbp;
909
910         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
911         if (dtbp == (vm_offset_t)NULL) {
912                 printf("ERROR loading DTB\n");
913                 return;
914         }
915
916         if (OF_install(OFW_FDT, 0) == FALSE)
917                 panic("Cannot install FDT");
918
919         if (OF_init((void *)dtbp) != 0)
920                 panic("OF_init failed with the found device tree");
921 }
922 #endif
923
924 static bool
925 bus_probe(void)
926 {
927         bool has_acpi, has_fdt;
928         char *order, *env;
929
930         has_acpi = has_fdt = false;
931
932 #ifdef FDT
933         has_fdt = (OF_peer(0) != 0);
934 #endif
935 #ifdef DEV_ACPI
936         has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
937 #endif
938
939         env = kern_getenv("kern.cfg.order");
940         if (env != NULL) {
941                 order = env;
942                 while (order != NULL) {
943                         if (has_acpi &&
944                             strncmp(order, "acpi", 4) == 0 &&
945                             (order[4] == ',' || order[4] == '\0')) {
946                                 arm64_bus_method = ARM64_BUS_ACPI;
947                                 break;
948                         }
949                         if (has_fdt &&
950                             strncmp(order, "fdt", 3) == 0 &&
951                             (order[3] == ',' || order[3] == '\0')) {
952                                 arm64_bus_method = ARM64_BUS_FDT;
953                                 break;
954                         }
955                         order = strchr(order, ',');
956                 }
957                 freeenv(env);
958
959                 /* If we set the bus method it is valid */
960                 if (arm64_bus_method != ARM64_BUS_NONE)
961                         return (true);
962         }
963         /* If no order or an invalid order was set use the default */
964         if (arm64_bus_method == ARM64_BUS_NONE) {
965                 if (has_fdt)
966                         arm64_bus_method = ARM64_BUS_FDT;
967                 else if (has_acpi)
968                         arm64_bus_method = ARM64_BUS_ACPI;
969         }
970
971         /*
972          * If no option was set the default is valid, otherwise we are
973          * setting one to get cninit() working, then calling panic to tell
974          * the user about the invalid bus setup.
975          */
976         return (env == NULL);
977 }
978
979 static void
980 cache_setup(void)
981 {
982         int dcache_line_shift, icache_line_shift, dczva_line_shift;
983         uint32_t ctr_el0;
984         uint32_t dczid_el0;
985
986         ctr_el0 = READ_SPECIALREG(ctr_el0);
987
988         /* Read the log2 words in each D cache line */
989         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
990         /* Get the D cache line size */
991         dcache_line_size = sizeof(int) << dcache_line_shift;
992
993         /* And the same for the I cache */
994         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
995         icache_line_size = sizeof(int) << icache_line_shift;
996
997         idcache_line_size = MIN(dcache_line_size, icache_line_size);
998
999         dczid_el0 = READ_SPECIALREG(dczid_el0);
1000
1001         /* Check if dc zva is not prohibited */
1002         if (dczid_el0 & DCZID_DZP)
1003                 dczva_line_size = 0;
1004         else {
1005                 /* Same as with above calculations */
1006                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1007                 dczva_line_size = sizeof(int) << dczva_line_shift;
1008
1009                 /* Change pagezero function */
1010                 pagezero = pagezero_cache;
1011         }
1012 }
1013
1014 void
1015 initarm(struct arm64_bootparams *abp)
1016 {
1017         struct efi_map_header *efihdr;
1018         struct pcpu *pcpup;
1019         char *env;
1020 #ifdef FDT
1021         struct mem_region mem_regions[FDT_MEM_REGIONS];
1022         int mem_regions_sz;
1023 #endif
1024         vm_offset_t lastaddr;
1025         caddr_t kmdp;
1026         vm_paddr_t mem_len;
1027         bool valid;
1028         int i;
1029
1030         /* Set the module data location */
1031         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
1032
1033         /* Find the kernel address */
1034         kmdp = preload_search_by_type("elf kernel");
1035         if (kmdp == NULL)
1036                 kmdp = preload_search_by_type("elf64 kernel");
1037
1038         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1039         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
1040
1041 #ifdef FDT
1042         try_load_dtb(kmdp);
1043 #endif
1044
1045         efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1046
1047         /* Find the address to start allocating from */
1048         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1049
1050         /* Load the physical memory ranges */
1051         physmap_idx = 0;
1052         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1053             MODINFO_METADATA | MODINFOMD_EFI_MAP);
1054         if (efihdr != NULL)
1055                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
1056 #ifdef FDT
1057         else {
1058                 /* Grab physical memory regions information from device tree. */
1059                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1060                     NULL) != 0)
1061                         panic("Cannot get physical memory regions");
1062                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
1063                     &physmap_idx);
1064         }
1065 #endif
1066
1067         /* Print the memory map */
1068         mem_len = 0;
1069         for (i = 0; i < physmap_idx; i += 2) {
1070                 dump_avail[i] = physmap[i];
1071                 dump_avail[i + 1] = physmap[i + 1];
1072                 mem_len += physmap[i + 1] - physmap[i];
1073         }
1074         dump_avail[i] = 0;
1075         dump_avail[i + 1] = 0;
1076
1077         /* Set the pcpu data, this is needed by pmap_bootstrap */
1078         pcpup = &__pcpu[0];
1079         pcpu_init(pcpup, 0, sizeof(struct pcpu));
1080
1081         /*
1082          * Set the pcpu pointer with a backup in tpidr_el1 to be
1083          * loaded when entering the kernel from userland.
1084          */
1085         __asm __volatile(
1086             "mov x18, %0 \n"
1087             "msr tpidr_el1, %0" :: "r"(pcpup));
1088
1089         PCPU_SET(curthread, &thread0);
1090
1091         /* Do basic tuning, hz etc */
1092         init_param1();
1093
1094         cache_setup();
1095         pan_setup();
1096
1097         /* Bootstrap enough of pmap  to enter the kernel proper */
1098         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1099             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1100
1101         devmap_bootstrap(0, NULL);
1102
1103         valid = bus_probe();
1104
1105         cninit();
1106
1107         if (!valid)
1108                 panic("Invalid bus configuration: %s",
1109                     kern_getenv("kern.cfg.order"));
1110
1111         init_proc0(abp->kern_stack);
1112         msgbufinit(msgbufp, msgbufsize);
1113         mutex_init();
1114         init_param2(physmem);
1115
1116         dbg_init();
1117         kdb_init();
1118         pan_enable();
1119
1120         env = kern_getenv("kernelname");
1121         if (env != NULL)
1122                 strlcpy(kernelname, env, sizeof(kernelname));
1123
1124         early_boot = 0;
1125 }
1126
1127 void
1128 dbg_init(void)
1129 {
1130
1131         /* Clear OS lock */
1132         WRITE_SPECIALREG(OSLAR_EL1, 0);
1133
1134         /* This permits DDB to use debug registers for watchpoints. */
1135         dbg_monitor_init();
1136
1137         /* TODO: Eventually will need to initialize debug registers here. */
1138 }
1139
1140 #ifdef DDB
1141 #include <ddb/ddb.h>
1142
1143 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1144 {
1145 #define PRINT_REG(reg)  \
1146     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1147
1148         PRINT_REG(actlr_el1);
1149         PRINT_REG(afsr0_el1);
1150         PRINT_REG(afsr1_el1);
1151         PRINT_REG(aidr_el1);
1152         PRINT_REG(amair_el1);
1153         PRINT_REG(ccsidr_el1);
1154         PRINT_REG(clidr_el1);
1155         PRINT_REG(contextidr_el1);
1156         PRINT_REG(cpacr_el1);
1157         PRINT_REG(csselr_el1);
1158         PRINT_REG(ctr_el0);
1159         PRINT_REG(currentel);
1160         PRINT_REG(daif);
1161         PRINT_REG(dczid_el0);
1162         PRINT_REG(elr_el1);
1163         PRINT_REG(esr_el1);
1164         PRINT_REG(far_el1);
1165 #if 0
1166         /* ARM64TODO: Enable VFP before reading floating-point registers */
1167         PRINT_REG(fpcr);
1168         PRINT_REG(fpsr);
1169 #endif
1170         PRINT_REG(id_aa64afr0_el1);
1171         PRINT_REG(id_aa64afr1_el1);
1172         PRINT_REG(id_aa64dfr0_el1);
1173         PRINT_REG(id_aa64dfr1_el1);
1174         PRINT_REG(id_aa64isar0_el1);
1175         PRINT_REG(id_aa64isar1_el1);
1176         PRINT_REG(id_aa64pfr0_el1);
1177         PRINT_REG(id_aa64pfr1_el1);
1178         PRINT_REG(id_afr0_el1);
1179         PRINT_REG(id_dfr0_el1);
1180         PRINT_REG(id_isar0_el1);
1181         PRINT_REG(id_isar1_el1);
1182         PRINT_REG(id_isar2_el1);
1183         PRINT_REG(id_isar3_el1);
1184         PRINT_REG(id_isar4_el1);
1185         PRINT_REG(id_isar5_el1);
1186         PRINT_REG(id_mmfr0_el1);
1187         PRINT_REG(id_mmfr1_el1);
1188         PRINT_REG(id_mmfr2_el1);
1189         PRINT_REG(id_mmfr3_el1);
1190 #if 0
1191         /* Missing from llvm */
1192         PRINT_REG(id_mmfr4_el1);
1193 #endif
1194         PRINT_REG(id_pfr0_el1);
1195         PRINT_REG(id_pfr1_el1);
1196         PRINT_REG(isr_el1);
1197         PRINT_REG(mair_el1);
1198         PRINT_REG(midr_el1);
1199         PRINT_REG(mpidr_el1);
1200         PRINT_REG(mvfr0_el1);
1201         PRINT_REG(mvfr1_el1);
1202         PRINT_REG(mvfr2_el1);
1203         PRINT_REG(revidr_el1);
1204         PRINT_REG(sctlr_el1);
1205         PRINT_REG(sp_el0);
1206         PRINT_REG(spsel);
1207         PRINT_REG(spsr_el1);
1208         PRINT_REG(tcr_el1);
1209         PRINT_REG(tpidr_el0);
1210         PRINT_REG(tpidr_el1);
1211         PRINT_REG(tpidrro_el0);
1212         PRINT_REG(ttbr0_el1);
1213         PRINT_REG(ttbr1_el1);
1214         PRINT_REG(vbar_el1);
1215 #undef PRINT_REG
1216 }
1217
1218 DB_SHOW_COMMAND(vtop, db_show_vtop)
1219 {
1220         uint64_t phys;
1221
1222         if (have_addr) {
1223                 phys = arm64_address_translate_s1e1r(addr);
1224                 db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1225                 phys = arm64_address_translate_s1e1w(addr);
1226                 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1227                 phys = arm64_address_translate_s1e0r(addr);
1228                 db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1229                 phys = arm64_address_translate_s1e0w(addr);
1230                 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1231         } else
1232                 db_printf("show vtop <virt_addr>\n");
1233 }
1234 #endif