]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
Merge lld trunk r321017 to contrib/llvm/tools/lld.
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_acpi.h"
29 #include "opt_compat.h"
30 #include "opt_platform.h"
31 #include "opt_ddb.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/buf.h>
39 #include <sys/bus.h>
40 #include <sys/cons.h>
41 #include <sys/cpu.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h> 
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/linker.h>
50 #include <sys/msgbuf.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/ptrace.h>
54 #include <sys/reboot.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
57 #include <sys/signalvar.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysent.h>
60 #include <sys/sysproto.h>
61 #include <sys/ucontext.h>
62 #include <sys/vdso.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pager.h>
71
72 #include <machine/armreg.h>
73 #include <machine/cpu.h>
74 #include <machine/debug_monitor.h>
75 #include <machine/kdb.h>
76 #include <machine/machdep.h>
77 #include <machine/metadata.h>
78 #include <machine/md_var.h>
79 #include <machine/pcb.h>
80 #include <machine/reg.h>
81 #include <machine/undefined.h>
82 #include <machine/vmparam.h>
83
84 #ifdef VFP
85 #include <machine/vfp.h>
86 #endif
87
88 #ifdef DEV_ACPI
89 #include <contrib/dev/acpica/include/acpi.h>
90 #include <machine/acpica_machdep.h>
91 #endif
92
93 #ifdef FDT
94 #include <dev/fdt/fdt_common.h>
95 #include <dev/ofw/openfirm.h>
96 #endif
97
98
99 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
100
101 struct pcpu __pcpu[MAXCPU];
102
103 static struct trapframe proc0_tf;
104
105 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
106 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
107
108 int early_boot = 1;
109 int cold = 1;
110 long realmem = 0;
111 long Maxmem = 0;
112
113 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
114 vm_paddr_t physmap[PHYSMAP_SIZE];
115 u_int physmap_idx;
116
117 struct kva_md_info kmi;
118
119 int64_t dcache_line_size;       /* The minimum D cache line size */
120 int64_t icache_line_size;       /* The minimum I cache line size */
121 int64_t idcache_line_size;      /* The minimum cache line size */
122 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
123 int has_pan;
124
125 /*
126  * Physical address of the EFI System Table. Stashed from the metadata hints
127  * passed into the kernel and used by the EFI code to call runtime services.
128  */
129 vm_paddr_t efi_systbl_phys;
130
131 /* pagezero_* implementations are provided in support.S */
132 void pagezero_simple(void *);
133 void pagezero_cache(void *);
134
135 /* pagezero_simple is default pagezero */
136 void (*pagezero)(void *p) = pagezero_simple;
137
138 static void
139 pan_setup(void)
140 {
141         uint64_t id_aa64mfr1;
142
143         id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
144         if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
145                 has_pan = 1;
146 }
147
148 void
149 pan_enable(void)
150 {
151
152         /*
153          * The LLVM integrated assembler doesn't understand the PAN
154          * PSTATE field. Because of this we need to manually create
155          * the instruction in an asm block. This is equivalent to:
156          * msr pan, #1
157          *
158          * This sets the PAN bit, stopping the kernel from accessing
159          * memory when userspace can also access it unless the kernel
160          * uses the userspace load/store instructions.
161          */
162         if (has_pan) {
163                 WRITE_SPECIALREG(sctlr_el1,
164                     READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
165                 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
166         }
167 }
168
169 static void
170 cpu_startup(void *dummy)
171 {
172
173         undef_init();
174         identify_cpu();
175
176         vm_ksubmap_init(&kmi);
177         bufinit();
178         vm_pager_bufferinit();
179 }
180
181 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
182
183 int
184 cpu_idle_wakeup(int cpu)
185 {
186
187         return (0);
188 }
189
190 int
191 fill_regs(struct thread *td, struct reg *regs)
192 {
193         struct trapframe *frame;
194
195         frame = td->td_frame;
196         regs->sp = frame->tf_sp;
197         regs->lr = frame->tf_lr;
198         regs->elr = frame->tf_elr;
199         regs->spsr = frame->tf_spsr;
200
201         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
202
203         return (0);
204 }
205
206 int
207 set_regs(struct thread *td, struct reg *regs)
208 {
209         struct trapframe *frame;
210
211         frame = td->td_frame;
212         frame->tf_sp = regs->sp;
213         frame->tf_lr = regs->lr;
214         frame->tf_elr = regs->elr;
215         frame->tf_spsr &= ~PSR_FLAGS;
216         frame->tf_spsr |= regs->spsr & PSR_FLAGS;
217
218         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
219
220         return (0);
221 }
222
223 int
224 fill_fpregs(struct thread *td, struct fpreg *regs)
225 {
226 #ifdef VFP
227         struct pcb *pcb;
228
229         pcb = td->td_pcb;
230         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
231                 /*
232                  * If we have just been running VFP instructions we will
233                  * need to save the state to memcpy it below.
234                  */
235                 if (td == curthread)
236                         vfp_save_state(td, pcb);
237
238                 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
239                     ("Called fill_fpregs while the kernel is using the VFP"));
240                 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
241                     sizeof(regs->fp_q));
242                 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
243                 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
244         } else
245 #endif
246                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
247         return (0);
248 }
249
250 int
251 set_fpregs(struct thread *td, struct fpreg *regs)
252 {
253 #ifdef VFP
254         struct pcb *pcb;
255
256         pcb = td->td_pcb;
257         KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
258             ("Called set_fpregs while the kernel is using the VFP"));
259         memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
260         pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
261         pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
262 #endif
263         return (0);
264 }
265
266 int
267 fill_dbregs(struct thread *td, struct dbreg *regs)
268 {
269
270         printf("ARM64TODO: fill_dbregs");
271         return (EDOOFUS);
272 }
273
274 int
275 set_dbregs(struct thread *td, struct dbreg *regs)
276 {
277
278         printf("ARM64TODO: set_dbregs");
279         return (EDOOFUS);
280 }
281
282 #ifdef COMPAT_FREEBSD32
283 int
284 fill_regs32(struct thread *td, struct reg32 *regs)
285 {
286
287         printf("ARM64TODO: fill_regs32");
288         return (EDOOFUS);
289 }
290
291 int
292 set_regs32(struct thread *td, struct reg32 *regs)
293 {
294
295         printf("ARM64TODO: set_regs32");
296         return (EDOOFUS);
297 }
298
299 int
300 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
301 {
302
303         printf("ARM64TODO: fill_fpregs32");
304         return (EDOOFUS);
305 }
306
307 int
308 set_fpregs32(struct thread *td, struct fpreg32 *regs)
309 {
310
311         printf("ARM64TODO: set_fpregs32");
312         return (EDOOFUS);
313 }
314
315 int
316 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
317 {
318
319         printf("ARM64TODO: fill_dbregs32");
320         return (EDOOFUS);
321 }
322
323 int
324 set_dbregs32(struct thread *td, struct dbreg32 *regs)
325 {
326
327         printf("ARM64TODO: set_dbregs32");
328         return (EDOOFUS);
329 }
330 #endif
331
332 int
333 ptrace_set_pc(struct thread *td, u_long addr)
334 {
335
336         printf("ARM64TODO: ptrace_set_pc");
337         return (EDOOFUS);
338 }
339
340 int
341 ptrace_single_step(struct thread *td)
342 {
343
344         td->td_frame->tf_spsr |= PSR_SS;
345         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
346         return (0);
347 }
348
349 int
350 ptrace_clear_single_step(struct thread *td)
351 {
352
353         td->td_frame->tf_spsr &= ~PSR_SS;
354         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
355         return (0);
356 }
357
358 void
359 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
360 {
361         struct trapframe *tf = td->td_frame;
362
363         memset(tf, 0, sizeof(struct trapframe));
364
365         tf->tf_x[0] = stack;
366         tf->tf_sp = STACKALIGN(stack);
367         tf->tf_lr = imgp->entry_addr;
368         tf->tf_elr = imgp->entry_addr;
369 }
370
371 /* Sanity check these are the same size, they will be memcpy'd to and fro */
372 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
373     sizeof((struct gpregs *)0)->gp_x);
374 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
375     sizeof((struct reg *)0)->x);
376
377 int
378 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
379 {
380         struct trapframe *tf = td->td_frame;
381
382         if (clear_ret & GET_MC_CLEAR_RET) {
383                 mcp->mc_gpregs.gp_x[0] = 0;
384                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
385         } else {
386                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
387                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
388         }
389
390         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
391             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
392
393         mcp->mc_gpregs.gp_sp = tf->tf_sp;
394         mcp->mc_gpregs.gp_lr = tf->tf_lr;
395         mcp->mc_gpregs.gp_elr = tf->tf_elr;
396
397         return (0);
398 }
399
400 int
401 set_mcontext(struct thread *td, mcontext_t *mcp)
402 {
403         struct trapframe *tf = td->td_frame;
404         uint32_t spsr;
405
406         spsr = mcp->mc_gpregs.gp_spsr;
407         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
408             (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
409                 return (EINVAL); 
410
411         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
412
413         tf->tf_sp = mcp->mc_gpregs.gp_sp;
414         tf->tf_lr = mcp->mc_gpregs.gp_lr;
415         tf->tf_elr = mcp->mc_gpregs.gp_elr;
416         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
417
418         return (0);
419 }
420
421 static void
422 get_fpcontext(struct thread *td, mcontext_t *mcp)
423 {
424 #ifdef VFP
425         struct pcb *curpcb;
426
427         critical_enter();
428
429         curpcb = curthread->td_pcb;
430
431         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
432                 /*
433                  * If we have just been running VFP instructions we will
434                  * need to save the state to memcpy it below.
435                  */
436                 vfp_save_state(td, curpcb);
437
438                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
439                     ("Called get_fpcontext while the kernel is using the VFP"));
440                 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
441                     ("Non-userspace FPU flags set in get_fpcontext"));
442                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
443                     sizeof(mcp->mc_fpregs));
444                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
445                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
446                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
447                 mcp->mc_flags |= _MC_FP_VALID;
448         }
449
450         critical_exit();
451 #endif
452 }
453
454 static void
455 set_fpcontext(struct thread *td, mcontext_t *mcp)
456 {
457 #ifdef VFP
458         struct pcb *curpcb;
459
460         critical_enter();
461
462         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
463                 curpcb = curthread->td_pcb;
464
465                 /*
466                  * Discard any vfp state for the current thread, we
467                  * are about to override it.
468                  */
469                 vfp_discard(td);
470
471                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
472                     ("Called set_fpcontext while the kernel is using the VFP"));
473                 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
474                     sizeof(mcp->mc_fpregs));
475                 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
476                 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
477                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
478         }
479
480         critical_exit();
481 #endif
482 }
483
484 void
485 cpu_idle(int busy)
486 {
487
488         spinlock_enter();
489         if (!busy)
490                 cpu_idleclock();
491         if (!sched_runnable())
492                 __asm __volatile(
493                     "dsb sy \n"
494                     "wfi    \n");
495         if (!busy)
496                 cpu_activeclock();
497         spinlock_exit();
498 }
499
500 void
501 cpu_halt(void)
502 {
503
504         /* We should have shutdown by now, if not enter a low power sleep */
505         intr_disable();
506         while (1) {
507                 __asm __volatile("wfi");
508         }
509 }
510
511 /*
512  * Flush the D-cache for non-DMA I/O so that the I-cache can
513  * be made coherent later.
514  */
515 void
516 cpu_flush_dcache(void *ptr, size_t len)
517 {
518
519         /* ARM64TODO TBD */
520 }
521
522 /* Get current clock frequency for the given CPU ID. */
523 int
524 cpu_est_clockrate(int cpu_id, uint64_t *rate)
525 {
526         struct pcpu *pc;
527
528         pc = pcpu_find(cpu_id);
529         if (pc == NULL || rate == NULL)
530                 return (EINVAL);
531
532         if (pc->pc_clock == 0)
533                 return (EOPNOTSUPP);
534
535         *rate = pc->pc_clock;
536         return (0);
537 }
538
539 void
540 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
541 {
542
543         pcpu->pc_acpi_id = 0xffffffff;
544 }
545
546 void
547 spinlock_enter(void)
548 {
549         struct thread *td;
550         register_t daif;
551
552         td = curthread;
553         if (td->td_md.md_spinlock_count == 0) {
554                 daif = intr_disable();
555                 td->td_md.md_spinlock_count = 1;
556                 td->td_md.md_saved_daif = daif;
557         } else
558                 td->td_md.md_spinlock_count++;
559         critical_enter();
560 }
561
562 void
563 spinlock_exit(void)
564 {
565         struct thread *td;
566         register_t daif;
567
568         td = curthread;
569         critical_exit();
570         daif = td->td_md.md_saved_daif;
571         td->td_md.md_spinlock_count--;
572         if (td->td_md.md_spinlock_count == 0)
573                 intr_restore(daif);
574 }
575
576 #ifndef _SYS_SYSPROTO_H_
577 struct sigreturn_args {
578         ucontext_t *ucp;
579 };
580 #endif
581
582 int
583 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
584 {
585         ucontext_t uc;
586         int error;
587
588         if (uap == NULL)
589                 return (EFAULT);
590         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
591                 return (EFAULT);
592
593         error = set_mcontext(td, &uc.uc_mcontext);
594         if (error != 0)
595                 return (error);
596         set_fpcontext(td, &uc.uc_mcontext);
597
598         /* Restore signal mask. */
599         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
600
601         return (EJUSTRETURN);
602 }
603
604 /*
605  * Construct a PCB from a trapframe. This is called from kdb_trap() where
606  * we want to start a backtrace from the function that caused us to enter
607  * the debugger. We have the context in the trapframe, but base the trace
608  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
609  * enough for a backtrace.
610  */
611 void
612 makectx(struct trapframe *tf, struct pcb *pcb)
613 {
614         int i;
615
616         for (i = 0; i < PCB_LR; i++)
617                 pcb->pcb_x[i] = tf->tf_x[i];
618
619         pcb->pcb_x[PCB_LR] = tf->tf_lr;
620         pcb->pcb_pc = tf->tf_elr;
621         pcb->pcb_sp = tf->tf_sp;
622 }
623
624 void
625 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
626 {
627         struct thread *td;
628         struct proc *p;
629         struct trapframe *tf;
630         struct sigframe *fp, frame;
631         struct sigacts *psp;
632         struct sysentvec *sysent;
633         int code, onstack, sig;
634
635         td = curthread;
636         p = td->td_proc;
637         PROC_LOCK_ASSERT(p, MA_OWNED);
638
639         sig = ksi->ksi_signo;
640         code = ksi->ksi_code;
641         psp = p->p_sigacts;
642         mtx_assert(&psp->ps_mtx, MA_OWNED);
643
644         tf = td->td_frame;
645         onstack = sigonstack(tf->tf_sp);
646
647         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
648             catcher, sig);
649
650         /* Allocate and validate space for the signal handler context. */
651         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
652             SIGISMEMBER(psp->ps_sigonstack, sig)) {
653                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
654                     td->td_sigstk.ss_size);
655 #if defined(COMPAT_43)
656                 td->td_sigstk.ss_flags |= SS_ONSTACK;
657 #endif
658         } else {
659                 fp = (struct sigframe *)td->td_frame->tf_sp;
660         }
661
662         /* Make room, keeping the stack aligned */
663         fp--;
664         fp = (struct sigframe *)STACKALIGN(fp);
665
666         /* Fill in the frame to copy out */
667         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
668         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
669         frame.sf_si = ksi->ksi_info;
670         frame.sf_uc.uc_sigmask = *mask;
671         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
672             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
673         frame.sf_uc.uc_stack = td->td_sigstk;
674         mtx_unlock(&psp->ps_mtx);
675         PROC_UNLOCK(td->td_proc);
676
677         /* Copy the sigframe out to the user's stack. */
678         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
679                 /* Process has trashed its stack. Kill it. */
680                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
681                 PROC_LOCK(p);
682                 sigexit(td, SIGILL);
683         }
684
685         tf->tf_x[0]= sig;
686         tf->tf_x[1] = (register_t)&fp->sf_si;
687         tf->tf_x[2] = (register_t)&fp->sf_uc;
688
689         tf->tf_elr = (register_t)catcher;
690         tf->tf_sp = (register_t)fp;
691         sysent = p->p_sysent;
692         if (sysent->sv_sigcode_base != 0)
693                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
694         else
695                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
696                     *(sysent->sv_szsigcode));
697
698         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
699             tf->tf_sp);
700
701         PROC_LOCK(p);
702         mtx_lock(&psp->ps_mtx);
703 }
704
705 static void
706 init_proc0(vm_offset_t kstack)
707 {
708         struct pcpu *pcpup = &__pcpu[0];
709
710         proc_linkup0(&proc0, &thread0);
711         thread0.td_kstack = kstack;
712         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
713         thread0.td_pcb->pcb_fpflags = 0;
714         thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
715         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
716         thread0.td_frame = &proc0_tf;
717         pcpup->pc_curpcb = thread0.td_pcb;
718 }
719
720 typedef struct {
721         uint32_t type;
722         uint64_t phys_start;
723         uint64_t virt_start;
724         uint64_t num_pages;
725         uint64_t attr;
726 } EFI_MEMORY_DESCRIPTOR;
727
728 static int
729 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
730     u_int *physmap_idxp)
731 {
732         u_int i, insert_idx, _physmap_idx;
733
734         _physmap_idx = *physmap_idxp;
735
736         if (length == 0)
737                 return (1);
738
739         /*
740          * Find insertion point while checking for overlap.  Start off by
741          * assuming the new entry will be added to the end.
742          */
743         insert_idx = _physmap_idx;
744         for (i = 0; i <= _physmap_idx; i += 2) {
745                 if (base < physmap[i + 1]) {
746                         if (base + length <= physmap[i]) {
747                                 insert_idx = i;
748                                 break;
749                         }
750                         if (boothowto & RB_VERBOSE)
751                                 printf(
752                     "Overlapping memory regions, ignoring second region\n");
753                         return (1);
754                 }
755         }
756
757         /* See if we can prepend to the next entry. */
758         if (insert_idx <= _physmap_idx &&
759             base + length == physmap[insert_idx]) {
760                 physmap[insert_idx] = base;
761                 return (1);
762         }
763
764         /* See if we can append to the previous entry. */
765         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
766                 physmap[insert_idx - 1] += length;
767                 return (1);
768         }
769
770         _physmap_idx += 2;
771         *physmap_idxp = _physmap_idx;
772         if (_physmap_idx == PHYSMAP_SIZE) {
773                 printf(
774                 "Too many segments in the physical address map, giving up\n");
775                 return (0);
776         }
777
778         /*
779          * Move the last 'N' entries down to make room for the new
780          * entry if needed.
781          */
782         for (i = _physmap_idx; i > insert_idx; i -= 2) {
783                 physmap[i] = physmap[i - 2];
784                 physmap[i + 1] = physmap[i - 1];
785         }
786
787         /* Insert the new entry. */
788         physmap[insert_idx] = base;
789         physmap[insert_idx + 1] = base + length;
790         return (1);
791 }
792
793 #ifdef FDT
794 static void
795 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
796     u_int *physmap_idxp)
797 {
798
799         for (int i = 0; i < mrcnt; i++) {
800                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
801                     physmap_idxp))
802                         break;
803         }
804 }
805 #endif
806
807 static void
808 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
809     u_int *physmap_idxp)
810 {
811         struct efi_md *map, *p;
812         const char *type;
813         size_t efisz;
814         int ndesc, i;
815
816         static const char *types[] = {
817                 "Reserved",
818                 "LoaderCode",
819                 "LoaderData",
820                 "BootServicesCode",
821                 "BootServicesData",
822                 "RuntimeServicesCode",
823                 "RuntimeServicesData",
824                 "ConventionalMemory",
825                 "UnusableMemory",
826                 "ACPIReclaimMemory",
827                 "ACPIMemoryNVS",
828                 "MemoryMappedIO",
829                 "MemoryMappedIOPortSpace",
830                 "PalCode",
831                 "PersistentMemory"
832         };
833
834         /*
835          * Memory map data provided by UEFI via the GetMemoryMap
836          * Boot Services API.
837          */
838         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
839         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
840
841         if (efihdr->descriptor_size == 0)
842                 return;
843         ndesc = efihdr->memory_size / efihdr->descriptor_size;
844
845         if (boothowto & RB_VERBOSE)
846                 printf("%23s %12s %12s %8s %4s\n",
847                     "Type", "Physical", "Virtual", "#Pages", "Attr");
848
849         for (i = 0, p = map; i < ndesc; i++,
850             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
851                 if (boothowto & RB_VERBOSE) {
852                         if (p->md_type < nitems(types))
853                                 type = types[p->md_type];
854                         else
855                                 type = "<INVALID>";
856                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
857                             p->md_virt, p->md_pages);
858                         if (p->md_attr & EFI_MD_ATTR_UC)
859                                 printf("UC ");
860                         if (p->md_attr & EFI_MD_ATTR_WC)
861                                 printf("WC ");
862                         if (p->md_attr & EFI_MD_ATTR_WT)
863                                 printf("WT ");
864                         if (p->md_attr & EFI_MD_ATTR_WB)
865                                 printf("WB ");
866                         if (p->md_attr & EFI_MD_ATTR_UCE)
867                                 printf("UCE ");
868                         if (p->md_attr & EFI_MD_ATTR_WP)
869                                 printf("WP ");
870                         if (p->md_attr & EFI_MD_ATTR_RP)
871                                 printf("RP ");
872                         if (p->md_attr & EFI_MD_ATTR_XP)
873                                 printf("XP ");
874                         if (p->md_attr & EFI_MD_ATTR_NV)
875                                 printf("NV ");
876                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
877                                 printf("MORE_RELIABLE ");
878                         if (p->md_attr & EFI_MD_ATTR_RO)
879                                 printf("RO ");
880                         if (p->md_attr & EFI_MD_ATTR_RT)
881                                 printf("RUNTIME");
882                         printf("\n");
883                 }
884
885                 switch (p->md_type) {
886                 case EFI_MD_TYPE_CODE:
887                 case EFI_MD_TYPE_DATA:
888                 case EFI_MD_TYPE_BS_CODE:
889                 case EFI_MD_TYPE_BS_DATA:
890                 case EFI_MD_TYPE_FREE:
891                         /*
892                          * We're allowed to use any entry with these types.
893                          */
894                         break;
895                 default:
896                         continue;
897                 }
898
899                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
900                     physmap, physmap_idxp))
901                         break;
902         }
903 }
904
905 #ifdef FDT
906 static void
907 try_load_dtb(caddr_t kmdp)
908 {
909         vm_offset_t dtbp;
910
911         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
912         if (dtbp == (vm_offset_t)NULL) {
913                 printf("ERROR loading DTB\n");
914                 return;
915         }
916
917         if (OF_install(OFW_FDT, 0) == FALSE)
918                 panic("Cannot install FDT");
919
920         if (OF_init((void *)dtbp) != 0)
921                 panic("OF_init failed with the found device tree");
922 }
923 #endif
924
925 static bool
926 bus_probe(void)
927 {
928         bool has_acpi, has_fdt;
929         char *order, *env;
930
931         has_acpi = has_fdt = false;
932
933 #ifdef FDT
934         has_fdt = (OF_peer(0) != 0);
935 #endif
936 #ifdef DEV_ACPI
937         has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
938 #endif
939
940         env = kern_getenv("kern.cfg.order");
941         if (env != NULL) {
942                 order = env;
943                 while (order != NULL) {
944                         if (has_acpi &&
945                             strncmp(order, "acpi", 4) == 0 &&
946                             (order[4] == ',' || order[4] == '\0')) {
947                                 arm64_bus_method = ARM64_BUS_ACPI;
948                                 break;
949                         }
950                         if (has_fdt &&
951                             strncmp(order, "fdt", 3) == 0 &&
952                             (order[3] == ',' || order[3] == '\0')) {
953                                 arm64_bus_method = ARM64_BUS_FDT;
954                                 break;
955                         }
956                         order = strchr(order, ',');
957                 }
958                 freeenv(env);
959
960                 /* If we set the bus method it is valid */
961                 if (arm64_bus_method != ARM64_BUS_NONE)
962                         return (true);
963         }
964         /* If no order or an invalid order was set use the default */
965         if (arm64_bus_method == ARM64_BUS_NONE) {
966                 if (has_fdt)
967                         arm64_bus_method = ARM64_BUS_FDT;
968                 else if (has_acpi)
969                         arm64_bus_method = ARM64_BUS_ACPI;
970         }
971
972         /*
973          * If no option was set the default is valid, otherwise we are
974          * setting one to get cninit() working, then calling panic to tell
975          * the user about the invalid bus setup.
976          */
977         return (env == NULL);
978 }
979
980 static void
981 cache_setup(void)
982 {
983         int dcache_line_shift, icache_line_shift, dczva_line_shift;
984         uint32_t ctr_el0;
985         uint32_t dczid_el0;
986
987         ctr_el0 = READ_SPECIALREG(ctr_el0);
988
989         /* Read the log2 words in each D cache line */
990         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
991         /* Get the D cache line size */
992         dcache_line_size = sizeof(int) << dcache_line_shift;
993
994         /* And the same for the I cache */
995         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
996         icache_line_size = sizeof(int) << icache_line_shift;
997
998         idcache_line_size = MIN(dcache_line_size, icache_line_size);
999
1000         dczid_el0 = READ_SPECIALREG(dczid_el0);
1001
1002         /* Check if dc zva is not prohibited */
1003         if (dczid_el0 & DCZID_DZP)
1004                 dczva_line_size = 0;
1005         else {
1006                 /* Same as with above calculations */
1007                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1008                 dczva_line_size = sizeof(int) << dczva_line_shift;
1009
1010                 /* Change pagezero function */
1011                 pagezero = pagezero_cache;
1012         }
1013 }
1014
1015 void
1016 initarm(struct arm64_bootparams *abp)
1017 {
1018         struct efi_map_header *efihdr;
1019         struct pcpu *pcpup;
1020         char *env;
1021 #ifdef FDT
1022         struct mem_region mem_regions[FDT_MEM_REGIONS];
1023         int mem_regions_sz;
1024 #endif
1025         vm_offset_t lastaddr;
1026         caddr_t kmdp;
1027         vm_paddr_t mem_len;
1028         bool valid;
1029         int i;
1030
1031         /* Set the module data location */
1032         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
1033
1034         /* Find the kernel address */
1035         kmdp = preload_search_by_type("elf kernel");
1036         if (kmdp == NULL)
1037                 kmdp = preload_search_by_type("elf64 kernel");
1038
1039         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1040         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
1041
1042 #ifdef FDT
1043         try_load_dtb(kmdp);
1044 #endif
1045
1046         efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1047
1048         /* Find the address to start allocating from */
1049         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1050
1051         /* Load the physical memory ranges */
1052         physmap_idx = 0;
1053         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1054             MODINFO_METADATA | MODINFOMD_EFI_MAP);
1055         if (efihdr != NULL)
1056                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
1057 #ifdef FDT
1058         else {
1059                 /* Grab physical memory regions information from device tree. */
1060                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1061                     NULL) != 0)
1062                         panic("Cannot get physical memory regions");
1063                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
1064                     &physmap_idx);
1065         }
1066 #endif
1067
1068         /* Print the memory map */
1069         mem_len = 0;
1070         for (i = 0; i < physmap_idx; i += 2) {
1071                 dump_avail[i] = physmap[i];
1072                 dump_avail[i + 1] = physmap[i + 1];
1073                 mem_len += physmap[i + 1] - physmap[i];
1074         }
1075         dump_avail[i] = 0;
1076         dump_avail[i + 1] = 0;
1077
1078         /* Set the pcpu data, this is needed by pmap_bootstrap */
1079         pcpup = &__pcpu[0];
1080         pcpu_init(pcpup, 0, sizeof(struct pcpu));
1081
1082         /*
1083          * Set the pcpu pointer with a backup in tpidr_el1 to be
1084          * loaded when entering the kernel from userland.
1085          */
1086         __asm __volatile(
1087             "mov x18, %0 \n"
1088             "msr tpidr_el1, %0" :: "r"(pcpup));
1089
1090         PCPU_SET(curthread, &thread0);
1091
1092         /* Do basic tuning, hz etc */
1093         init_param1();
1094
1095         cache_setup();
1096         pan_setup();
1097
1098         /* Bootstrap enough of pmap  to enter the kernel proper */
1099         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1100             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1101
1102         devmap_bootstrap(0, NULL);
1103
1104         valid = bus_probe();
1105
1106         cninit();
1107
1108         if (!valid)
1109                 panic("Invalid bus configuration: %s",
1110                     kern_getenv("kern.cfg.order"));
1111
1112         init_proc0(abp->kern_stack);
1113         msgbufinit(msgbufp, msgbufsize);
1114         mutex_init();
1115         init_param2(physmem);
1116
1117         dbg_init();
1118         kdb_init();
1119         pan_enable();
1120
1121         env = kern_getenv("kernelname");
1122         if (env != NULL)
1123                 strlcpy(kernelname, env, sizeof(kernelname));
1124
1125         early_boot = 0;
1126 }
1127
1128 void
1129 dbg_init(void)
1130 {
1131
1132         /* Clear OS lock */
1133         WRITE_SPECIALREG(OSLAR_EL1, 0);
1134
1135         /* This permits DDB to use debug registers for watchpoints. */
1136         dbg_monitor_init();
1137
1138         /* TODO: Eventually will need to initialize debug registers here. */
1139 }
1140
1141 #ifdef DDB
1142 #include <ddb/ddb.h>
1143
1144 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1145 {
1146 #define PRINT_REG(reg)  \
1147     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1148
1149         PRINT_REG(actlr_el1);
1150         PRINT_REG(afsr0_el1);
1151         PRINT_REG(afsr1_el1);
1152         PRINT_REG(aidr_el1);
1153         PRINT_REG(amair_el1);
1154         PRINT_REG(ccsidr_el1);
1155         PRINT_REG(clidr_el1);
1156         PRINT_REG(contextidr_el1);
1157         PRINT_REG(cpacr_el1);
1158         PRINT_REG(csselr_el1);
1159         PRINT_REG(ctr_el0);
1160         PRINT_REG(currentel);
1161         PRINT_REG(daif);
1162         PRINT_REG(dczid_el0);
1163         PRINT_REG(elr_el1);
1164         PRINT_REG(esr_el1);
1165         PRINT_REG(far_el1);
1166 #if 0
1167         /* ARM64TODO: Enable VFP before reading floating-point registers */
1168         PRINT_REG(fpcr);
1169         PRINT_REG(fpsr);
1170 #endif
1171         PRINT_REG(id_aa64afr0_el1);
1172         PRINT_REG(id_aa64afr1_el1);
1173         PRINT_REG(id_aa64dfr0_el1);
1174         PRINT_REG(id_aa64dfr1_el1);
1175         PRINT_REG(id_aa64isar0_el1);
1176         PRINT_REG(id_aa64isar1_el1);
1177         PRINT_REG(id_aa64pfr0_el1);
1178         PRINT_REG(id_aa64pfr1_el1);
1179         PRINT_REG(id_afr0_el1);
1180         PRINT_REG(id_dfr0_el1);
1181         PRINT_REG(id_isar0_el1);
1182         PRINT_REG(id_isar1_el1);
1183         PRINT_REG(id_isar2_el1);
1184         PRINT_REG(id_isar3_el1);
1185         PRINT_REG(id_isar4_el1);
1186         PRINT_REG(id_isar5_el1);
1187         PRINT_REG(id_mmfr0_el1);
1188         PRINT_REG(id_mmfr1_el1);
1189         PRINT_REG(id_mmfr2_el1);
1190         PRINT_REG(id_mmfr3_el1);
1191 #if 0
1192         /* Missing from llvm */
1193         PRINT_REG(id_mmfr4_el1);
1194 #endif
1195         PRINT_REG(id_pfr0_el1);
1196         PRINT_REG(id_pfr1_el1);
1197         PRINT_REG(isr_el1);
1198         PRINT_REG(mair_el1);
1199         PRINT_REG(midr_el1);
1200         PRINT_REG(mpidr_el1);
1201         PRINT_REG(mvfr0_el1);
1202         PRINT_REG(mvfr1_el1);
1203         PRINT_REG(mvfr2_el1);
1204         PRINT_REG(revidr_el1);
1205         PRINT_REG(sctlr_el1);
1206         PRINT_REG(sp_el0);
1207         PRINT_REG(spsel);
1208         PRINT_REG(spsr_el1);
1209         PRINT_REG(tcr_el1);
1210         PRINT_REG(tpidr_el0);
1211         PRINT_REG(tpidr_el1);
1212         PRINT_REG(tpidrro_el0);
1213         PRINT_REG(ttbr0_el1);
1214         PRINT_REG(ttbr1_el1);
1215         PRINT_REG(vbar_el1);
1216 #undef PRINT_REG
1217 }
1218
1219 DB_SHOW_COMMAND(vtop, db_show_vtop)
1220 {
1221         uint64_t phys;
1222
1223         if (have_addr) {
1224                 phys = arm64_address_translate_s1e1r(addr);
1225                 db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1226                 phys = arm64_address_translate_s1e1w(addr);
1227                 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1228                 phys = arm64_address_translate_s1e0r(addr);
1229                 db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1230                 phys = arm64_address_translate_s1e0w(addr);
1231                 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1232         } else
1233                 db_printf("show vtop <virt_addr>\n");
1234 }
1235 #endif