]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/machdep.c
Add a framework to install CPU errata on arm64. Each erratum can encode
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / machdep.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27
28 #include "opt_acpi.h"
29 #include "opt_compat.h"
30 #include "opt_platform.h"
31 #include "opt_ddb.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/buf.h>
39 #include <sys/bus.h>
40 #include <sys/cons.h>
41 #include <sys/cpu.h>
42 #include <sys/devmap.h>
43 #include <sys/efi.h>
44 #include <sys/exec.h>
45 #include <sys/imgact.h>
46 #include <sys/kdb.h> 
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
49 #include <sys/linker.h>
50 #include <sys/msgbuf.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/ptrace.h>
54 #include <sys/reboot.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
57 #include <sys/signalvar.h>
58 #include <sys/syscallsubr.h>
59 #include <sys/sysent.h>
60 #include <sys/sysproto.h>
61 #include <sys/ucontext.h>
62 #include <sys/vdso.h>
63
64 #include <vm/vm.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_pager.h>
71
72 #include <machine/armreg.h>
73 #include <machine/cpu.h>
74 #include <machine/debug_monitor.h>
75 #include <machine/kdb.h>
76 #include <machine/machdep.h>
77 #include <machine/metadata.h>
78 #include <machine/md_var.h>
79 #include <machine/pcb.h>
80 #include <machine/reg.h>
81 #include <machine/undefined.h>
82 #include <machine/vmparam.h>
83
84 #ifdef VFP
85 #include <machine/vfp.h>
86 #endif
87
88 #ifdef DEV_ACPI
89 #include <contrib/dev/acpica/include/acpi.h>
90 #include <machine/acpica_machdep.h>
91 #endif
92
93 #ifdef FDT
94 #include <dev/fdt/fdt_common.h>
95 #include <dev/ofw/openfirm.h>
96 #endif
97
98
99 enum arm64_bus arm64_bus_method = ARM64_BUS_NONE;
100
101 struct pcpu __pcpu[MAXCPU];
102
103 static struct trapframe proc0_tf;
104
105 vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
106 vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
107
108 int early_boot = 1;
109 int cold = 1;
110 long realmem = 0;
111 long Maxmem = 0;
112
113 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
114 vm_paddr_t physmap[PHYSMAP_SIZE];
115 u_int physmap_idx;
116
117 struct kva_md_info kmi;
118
119 int64_t dcache_line_size;       /* The minimum D cache line size */
120 int64_t icache_line_size;       /* The minimum I cache line size */
121 int64_t idcache_line_size;      /* The minimum cache line size */
122 int64_t dczva_line_size;        /* The size of cache line the dc zva zeroes */
123 int has_pan;
124
125 /*
126  * Physical address of the EFI System Table. Stashed from the metadata hints
127  * passed into the kernel and used by the EFI code to call runtime services.
128  */
129 vm_paddr_t efi_systbl_phys;
130
131 /* pagezero_* implementations are provided in support.S */
132 void pagezero_simple(void *);
133 void pagezero_cache(void *);
134
135 /* pagezero_simple is default pagezero */
136 void (*pagezero)(void *p) = pagezero_simple;
137
138 static void
139 pan_setup(void)
140 {
141         uint64_t id_aa64mfr1;
142
143         id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
144         if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE)
145                 has_pan = 1;
146 }
147
148 void
149 pan_enable(void)
150 {
151
152         /*
153          * The LLVM integrated assembler doesn't understand the PAN
154          * PSTATE field. Because of this we need to manually create
155          * the instruction in an asm block. This is equivalent to:
156          * msr pan, #1
157          *
158          * This sets the PAN bit, stopping the kernel from accessing
159          * memory when userspace can also access it unless the kernel
160          * uses the userspace load/store instructions.
161          */
162         if (has_pan) {
163                 WRITE_SPECIALREG(sctlr_el1,
164                     READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN);
165                 __asm __volatile(".inst 0xd500409f | (0x1 << 8)");
166         }
167 }
168
169 static void
170 cpu_startup(void *dummy)
171 {
172
173         undef_init();
174         identify_cpu();
175         install_cpu_errata();
176
177         vm_ksubmap_init(&kmi);
178         bufinit();
179         vm_pager_bufferinit();
180 }
181
182 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
183
184 int
185 cpu_idle_wakeup(int cpu)
186 {
187
188         return (0);
189 }
190
191 int
192 fill_regs(struct thread *td, struct reg *regs)
193 {
194         struct trapframe *frame;
195
196         frame = td->td_frame;
197         regs->sp = frame->tf_sp;
198         regs->lr = frame->tf_lr;
199         regs->elr = frame->tf_elr;
200         regs->spsr = frame->tf_spsr;
201
202         memcpy(regs->x, frame->tf_x, sizeof(regs->x));
203
204         return (0);
205 }
206
207 int
208 set_regs(struct thread *td, struct reg *regs)
209 {
210         struct trapframe *frame;
211
212         frame = td->td_frame;
213         frame->tf_sp = regs->sp;
214         frame->tf_lr = regs->lr;
215         frame->tf_elr = regs->elr;
216         frame->tf_spsr &= ~PSR_FLAGS;
217         frame->tf_spsr |= regs->spsr & PSR_FLAGS;
218
219         memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
220
221         return (0);
222 }
223
224 int
225 fill_fpregs(struct thread *td, struct fpreg *regs)
226 {
227 #ifdef VFP
228         struct pcb *pcb;
229
230         pcb = td->td_pcb;
231         if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
232                 /*
233                  * If we have just been running VFP instructions we will
234                  * need to save the state to memcpy it below.
235                  */
236                 if (td == curthread)
237                         vfp_save_state(td, pcb);
238
239                 KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
240                     ("Called fill_fpregs while the kernel is using the VFP"));
241                 memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs,
242                     sizeof(regs->fp_q));
243                 regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr;
244                 regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr;
245         } else
246 #endif
247                 memset(regs->fp_q, 0, sizeof(regs->fp_q));
248         return (0);
249 }
250
251 int
252 set_fpregs(struct thread *td, struct fpreg *regs)
253 {
254 #ifdef VFP
255         struct pcb *pcb;
256
257         pcb = td->td_pcb;
258         KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate,
259             ("Called set_fpregs while the kernel is using the VFP"));
260         memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q));
261         pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr;
262         pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr;
263 #endif
264         return (0);
265 }
266
267 int
268 fill_dbregs(struct thread *td, struct dbreg *regs)
269 {
270
271         printf("ARM64TODO: fill_dbregs");
272         return (EDOOFUS);
273 }
274
275 int
276 set_dbregs(struct thread *td, struct dbreg *regs)
277 {
278
279         printf("ARM64TODO: set_dbregs");
280         return (EDOOFUS);
281 }
282
283 #ifdef COMPAT_FREEBSD32
284 int
285 fill_regs32(struct thread *td, struct reg32 *regs)
286 {
287
288         printf("ARM64TODO: fill_regs32");
289         return (EDOOFUS);
290 }
291
292 int
293 set_regs32(struct thread *td, struct reg32 *regs)
294 {
295
296         printf("ARM64TODO: set_regs32");
297         return (EDOOFUS);
298 }
299
300 int
301 fill_fpregs32(struct thread *td, struct fpreg32 *regs)
302 {
303
304         printf("ARM64TODO: fill_fpregs32");
305         return (EDOOFUS);
306 }
307
308 int
309 set_fpregs32(struct thread *td, struct fpreg32 *regs)
310 {
311
312         printf("ARM64TODO: set_fpregs32");
313         return (EDOOFUS);
314 }
315
316 int
317 fill_dbregs32(struct thread *td, struct dbreg32 *regs)
318 {
319
320         printf("ARM64TODO: fill_dbregs32");
321         return (EDOOFUS);
322 }
323
324 int
325 set_dbregs32(struct thread *td, struct dbreg32 *regs)
326 {
327
328         printf("ARM64TODO: set_dbregs32");
329         return (EDOOFUS);
330 }
331 #endif
332
333 int
334 ptrace_set_pc(struct thread *td, u_long addr)
335 {
336
337         printf("ARM64TODO: ptrace_set_pc");
338         return (EDOOFUS);
339 }
340
341 int
342 ptrace_single_step(struct thread *td)
343 {
344
345         td->td_frame->tf_spsr |= PSR_SS;
346         td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
347         return (0);
348 }
349
350 int
351 ptrace_clear_single_step(struct thread *td)
352 {
353
354         td->td_frame->tf_spsr &= ~PSR_SS;
355         td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
356         return (0);
357 }
358
359 void
360 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
361 {
362         struct trapframe *tf = td->td_frame;
363
364         memset(tf, 0, sizeof(struct trapframe));
365
366         tf->tf_x[0] = stack;
367         tf->tf_sp = STACKALIGN(stack);
368         tf->tf_lr = imgp->entry_addr;
369         tf->tf_elr = imgp->entry_addr;
370 }
371
372 /* Sanity check these are the same size, they will be memcpy'd to and fro */
373 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
374     sizeof((struct gpregs *)0)->gp_x);
375 CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
376     sizeof((struct reg *)0)->x);
377
378 int
379 get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
380 {
381         struct trapframe *tf = td->td_frame;
382
383         if (clear_ret & GET_MC_CLEAR_RET) {
384                 mcp->mc_gpregs.gp_x[0] = 0;
385                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C;
386         } else {
387                 mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
388                 mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
389         }
390
391         memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
392             sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
393
394         mcp->mc_gpregs.gp_sp = tf->tf_sp;
395         mcp->mc_gpregs.gp_lr = tf->tf_lr;
396         mcp->mc_gpregs.gp_elr = tf->tf_elr;
397
398         return (0);
399 }
400
401 int
402 set_mcontext(struct thread *td, mcontext_t *mcp)
403 {
404         struct trapframe *tf = td->td_frame;
405         uint32_t spsr;
406
407         spsr = mcp->mc_gpregs.gp_spsr;
408         if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
409             (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
410                 return (EINVAL); 
411
412         memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
413
414         tf->tf_sp = mcp->mc_gpregs.gp_sp;
415         tf->tf_lr = mcp->mc_gpregs.gp_lr;
416         tf->tf_elr = mcp->mc_gpregs.gp_elr;
417         tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
418
419         return (0);
420 }
421
422 static void
423 get_fpcontext(struct thread *td, mcontext_t *mcp)
424 {
425 #ifdef VFP
426         struct pcb *curpcb;
427
428         critical_enter();
429
430         curpcb = curthread->td_pcb;
431
432         if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
433                 /*
434                  * If we have just been running VFP instructions we will
435                  * need to save the state to memcpy it below.
436                  */
437                 vfp_save_state(td, curpcb);
438
439                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
440                     ("Called get_fpcontext while the kernel is using the VFP"));
441                 KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0,
442                     ("Non-userspace FPU flags set in get_fpcontext"));
443                 memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs,
444                     sizeof(mcp->mc_fpregs));
445                 mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr;
446                 mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr;
447                 mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
448                 mcp->mc_flags |= _MC_FP_VALID;
449         }
450
451         critical_exit();
452 #endif
453 }
454
455 static void
456 set_fpcontext(struct thread *td, mcontext_t *mcp)
457 {
458 #ifdef VFP
459         struct pcb *curpcb;
460
461         critical_enter();
462
463         if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
464                 curpcb = curthread->td_pcb;
465
466                 /*
467                  * Discard any vfp state for the current thread, we
468                  * are about to override it.
469                  */
470                 vfp_discard(td);
471
472                 KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate,
473                     ("Called set_fpcontext while the kernel is using the VFP"));
474                 memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q,
475                     sizeof(mcp->mc_fpregs));
476                 curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr;
477                 curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr;
478                 curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK;
479         }
480
481         critical_exit();
482 #endif
483 }
484
485 void
486 cpu_idle(int busy)
487 {
488
489         spinlock_enter();
490         if (!busy)
491                 cpu_idleclock();
492         if (!sched_runnable())
493                 __asm __volatile(
494                     "dsb sy \n"
495                     "wfi    \n");
496         if (!busy)
497                 cpu_activeclock();
498         spinlock_exit();
499 }
500
501 void
502 cpu_halt(void)
503 {
504
505         /* We should have shutdown by now, if not enter a low power sleep */
506         intr_disable();
507         while (1) {
508                 __asm __volatile("wfi");
509         }
510 }
511
512 /*
513  * Flush the D-cache for non-DMA I/O so that the I-cache can
514  * be made coherent later.
515  */
516 void
517 cpu_flush_dcache(void *ptr, size_t len)
518 {
519
520         /* ARM64TODO TBD */
521 }
522
523 /* Get current clock frequency for the given CPU ID. */
524 int
525 cpu_est_clockrate(int cpu_id, uint64_t *rate)
526 {
527         struct pcpu *pc;
528
529         pc = pcpu_find(cpu_id);
530         if (pc == NULL || rate == NULL)
531                 return (EINVAL);
532
533         if (pc->pc_clock == 0)
534                 return (EOPNOTSUPP);
535
536         *rate = pc->pc_clock;
537         return (0);
538 }
539
540 void
541 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
542 {
543
544         pcpu->pc_acpi_id = 0xffffffff;
545 }
546
547 void
548 spinlock_enter(void)
549 {
550         struct thread *td;
551         register_t daif;
552
553         td = curthread;
554         if (td->td_md.md_spinlock_count == 0) {
555                 daif = intr_disable();
556                 td->td_md.md_spinlock_count = 1;
557                 td->td_md.md_saved_daif = daif;
558         } else
559                 td->td_md.md_spinlock_count++;
560         critical_enter();
561 }
562
563 void
564 spinlock_exit(void)
565 {
566         struct thread *td;
567         register_t daif;
568
569         td = curthread;
570         critical_exit();
571         daif = td->td_md.md_saved_daif;
572         td->td_md.md_spinlock_count--;
573         if (td->td_md.md_spinlock_count == 0)
574                 intr_restore(daif);
575 }
576
577 #ifndef _SYS_SYSPROTO_H_
578 struct sigreturn_args {
579         ucontext_t *ucp;
580 };
581 #endif
582
583 int
584 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
585 {
586         ucontext_t uc;
587         int error;
588
589         if (uap == NULL)
590                 return (EFAULT);
591         if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
592                 return (EFAULT);
593
594         error = set_mcontext(td, &uc.uc_mcontext);
595         if (error != 0)
596                 return (error);
597         set_fpcontext(td, &uc.uc_mcontext);
598
599         /* Restore signal mask. */
600         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
601
602         return (EJUSTRETURN);
603 }
604
605 /*
606  * Construct a PCB from a trapframe. This is called from kdb_trap() where
607  * we want to start a backtrace from the function that caused us to enter
608  * the debugger. We have the context in the trapframe, but base the trace
609  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
610  * enough for a backtrace.
611  */
612 void
613 makectx(struct trapframe *tf, struct pcb *pcb)
614 {
615         int i;
616
617         for (i = 0; i < PCB_LR; i++)
618                 pcb->pcb_x[i] = tf->tf_x[i];
619
620         pcb->pcb_x[PCB_LR] = tf->tf_lr;
621         pcb->pcb_pc = tf->tf_elr;
622         pcb->pcb_sp = tf->tf_sp;
623 }
624
625 void
626 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
627 {
628         struct thread *td;
629         struct proc *p;
630         struct trapframe *tf;
631         struct sigframe *fp, frame;
632         struct sigacts *psp;
633         struct sysentvec *sysent;
634         int onstack, sig;
635
636         td = curthread;
637         p = td->td_proc;
638         PROC_LOCK_ASSERT(p, MA_OWNED);
639
640         sig = ksi->ksi_signo;
641         psp = p->p_sigacts;
642         mtx_assert(&psp->ps_mtx, MA_OWNED);
643
644         tf = td->td_frame;
645         onstack = sigonstack(tf->tf_sp);
646
647         CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
648             catcher, sig);
649
650         /* Allocate and validate space for the signal handler context. */
651         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
652             SIGISMEMBER(psp->ps_sigonstack, sig)) {
653                 fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
654                     td->td_sigstk.ss_size);
655 #if defined(COMPAT_43)
656                 td->td_sigstk.ss_flags |= SS_ONSTACK;
657 #endif
658         } else {
659                 fp = (struct sigframe *)td->td_frame->tf_sp;
660         }
661
662         /* Make room, keeping the stack aligned */
663         fp--;
664         fp = (struct sigframe *)STACKALIGN(fp);
665
666         /* Fill in the frame to copy out */
667         get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
668         get_fpcontext(td, &frame.sf_uc.uc_mcontext);
669         frame.sf_si = ksi->ksi_info;
670         frame.sf_uc.uc_sigmask = *mask;
671         frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
672             ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
673         frame.sf_uc.uc_stack = td->td_sigstk;
674         mtx_unlock(&psp->ps_mtx);
675         PROC_UNLOCK(td->td_proc);
676
677         /* Copy the sigframe out to the user's stack. */
678         if (copyout(&frame, fp, sizeof(*fp)) != 0) {
679                 /* Process has trashed its stack. Kill it. */
680                 CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
681                 PROC_LOCK(p);
682                 sigexit(td, SIGILL);
683         }
684
685         tf->tf_x[0]= sig;
686         tf->tf_x[1] = (register_t)&fp->sf_si;
687         tf->tf_x[2] = (register_t)&fp->sf_uc;
688
689         tf->tf_elr = (register_t)catcher;
690         tf->tf_sp = (register_t)fp;
691         sysent = p->p_sysent;
692         if (sysent->sv_sigcode_base != 0)
693                 tf->tf_lr = (register_t)sysent->sv_sigcode_base;
694         else
695                 tf->tf_lr = (register_t)(sysent->sv_psstrings -
696                     *(sysent->sv_szsigcode));
697
698         CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
699             tf->tf_sp);
700
701         PROC_LOCK(p);
702         mtx_lock(&psp->ps_mtx);
703 }
704
705 static void
706 init_proc0(vm_offset_t kstack)
707 {
708         struct pcpu *pcpup = &__pcpu[0];
709
710         proc_linkup0(&proc0, &thread0);
711         thread0.td_kstack = kstack;
712         thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
713         thread0.td_pcb->pcb_fpflags = 0;
714         thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
715         thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
716         thread0.td_frame = &proc0_tf;
717         pcpup->pc_curpcb = thread0.td_pcb;
718 }
719
720 typedef struct {
721         uint32_t type;
722         uint64_t phys_start;
723         uint64_t virt_start;
724         uint64_t num_pages;
725         uint64_t attr;
726 } EFI_MEMORY_DESCRIPTOR;
727
728 static int
729 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
730     u_int *physmap_idxp)
731 {
732         u_int i, insert_idx, _physmap_idx;
733
734         _physmap_idx = *physmap_idxp;
735
736         if (length == 0)
737                 return (1);
738
739         /*
740          * Find insertion point while checking for overlap.  Start off by
741          * assuming the new entry will be added to the end.
742          */
743         insert_idx = _physmap_idx;
744         for (i = 0; i <= _physmap_idx; i += 2) {
745                 if (base < physmap[i + 1]) {
746                         if (base + length <= physmap[i]) {
747                                 insert_idx = i;
748                                 break;
749                         }
750                         if (boothowto & RB_VERBOSE)
751                                 printf(
752                     "Overlapping memory regions, ignoring second region\n");
753                         return (1);
754                 }
755         }
756
757         /* See if we can prepend to the next entry. */
758         if (insert_idx <= _physmap_idx &&
759             base + length == physmap[insert_idx]) {
760                 physmap[insert_idx] = base;
761                 return (1);
762         }
763
764         /* See if we can append to the previous entry. */
765         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
766                 physmap[insert_idx - 1] += length;
767                 return (1);
768         }
769
770         _physmap_idx += 2;
771         *physmap_idxp = _physmap_idx;
772         if (_physmap_idx == PHYSMAP_SIZE) {
773                 printf(
774                 "Too many segments in the physical address map, giving up\n");
775                 return (0);
776         }
777
778         /*
779          * Move the last 'N' entries down to make room for the new
780          * entry if needed.
781          */
782         for (i = _physmap_idx; i > insert_idx; i -= 2) {
783                 physmap[i] = physmap[i - 2];
784                 physmap[i + 1] = physmap[i - 1];
785         }
786
787         /* Insert the new entry. */
788         physmap[insert_idx] = base;
789         physmap[insert_idx + 1] = base + length;
790         return (1);
791 }
792
793 #ifdef FDT
794 static void
795 add_fdt_mem_regions(struct mem_region *mr, int mrcnt, vm_paddr_t *physmap,
796     u_int *physmap_idxp)
797 {
798
799         for (int i = 0; i < mrcnt; i++) {
800                 if (!add_physmap_entry(mr[i].mr_start, mr[i].mr_size, physmap,
801                     physmap_idxp))
802                         break;
803         }
804 }
805 #endif
806
807 static void
808 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
809     u_int *physmap_idxp)
810 {
811         struct efi_md *map, *p;
812         const char *type;
813         size_t efisz;
814         int ndesc, i;
815
816         static const char *types[] = {
817                 "Reserved",
818                 "LoaderCode",
819                 "LoaderData",
820                 "BootServicesCode",
821                 "BootServicesData",
822                 "RuntimeServicesCode",
823                 "RuntimeServicesData",
824                 "ConventionalMemory",
825                 "UnusableMemory",
826                 "ACPIReclaimMemory",
827                 "ACPIMemoryNVS",
828                 "MemoryMappedIO",
829                 "MemoryMappedIOPortSpace",
830                 "PalCode",
831                 "PersistentMemory"
832         };
833
834         /*
835          * Memory map data provided by UEFI via the GetMemoryMap
836          * Boot Services API.
837          */
838         efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
839         map = (struct efi_md *)((uint8_t *)efihdr + efisz); 
840
841         if (efihdr->descriptor_size == 0)
842                 return;
843         ndesc = efihdr->memory_size / efihdr->descriptor_size;
844
845         if (boothowto & RB_VERBOSE)
846                 printf("%23s %12s %12s %8s %4s\n",
847                     "Type", "Physical", "Virtual", "#Pages", "Attr");
848
849         for (i = 0, p = map; i < ndesc; i++,
850             p = efi_next_descriptor(p, efihdr->descriptor_size)) {
851                 if (boothowto & RB_VERBOSE) {
852                         if (p->md_type < nitems(types))
853                                 type = types[p->md_type];
854                         else
855                                 type = "<INVALID>";
856                         printf("%23s %012lx %12p %08lx ", type, p->md_phys,
857                             p->md_virt, p->md_pages);
858                         if (p->md_attr & EFI_MD_ATTR_UC)
859                                 printf("UC ");
860                         if (p->md_attr & EFI_MD_ATTR_WC)
861                                 printf("WC ");
862                         if (p->md_attr & EFI_MD_ATTR_WT)
863                                 printf("WT ");
864                         if (p->md_attr & EFI_MD_ATTR_WB)
865                                 printf("WB ");
866                         if (p->md_attr & EFI_MD_ATTR_UCE)
867                                 printf("UCE ");
868                         if (p->md_attr & EFI_MD_ATTR_WP)
869                                 printf("WP ");
870                         if (p->md_attr & EFI_MD_ATTR_RP)
871                                 printf("RP ");
872                         if (p->md_attr & EFI_MD_ATTR_XP)
873                                 printf("XP ");
874                         if (p->md_attr & EFI_MD_ATTR_NV)
875                                 printf("NV ");
876                         if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
877                                 printf("MORE_RELIABLE ");
878                         if (p->md_attr & EFI_MD_ATTR_RO)
879                                 printf("RO ");
880                         if (p->md_attr & EFI_MD_ATTR_RT)
881                                 printf("RUNTIME");
882                         printf("\n");
883                 }
884
885                 switch (p->md_type) {
886                 case EFI_MD_TYPE_CODE:
887                 case EFI_MD_TYPE_DATA:
888                 case EFI_MD_TYPE_BS_CODE:
889                 case EFI_MD_TYPE_BS_DATA:
890                 case EFI_MD_TYPE_FREE:
891                         /*
892                          * We're allowed to use any entry with these types.
893                          */
894                         break;
895                 default:
896                         continue;
897                 }
898
899                 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
900                     physmap, physmap_idxp))
901                         break;
902         }
903 }
904
905 #ifdef FDT
906 static void
907 try_load_dtb(caddr_t kmdp)
908 {
909         vm_offset_t dtbp;
910
911         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
912         if (dtbp == (vm_offset_t)NULL) {
913                 printf("ERROR loading DTB\n");
914                 return;
915         }
916
917         if (OF_install(OFW_FDT, 0) == FALSE)
918                 panic("Cannot install FDT");
919
920         if (OF_init((void *)dtbp) != 0)
921                 panic("OF_init failed with the found device tree");
922 }
923 #endif
924
925 static bool
926 bus_probe(void)
927 {
928         bool has_acpi, has_fdt;
929         char *order, *env;
930
931         has_acpi = has_fdt = false;
932
933 #ifdef FDT
934         has_fdt = (OF_peer(0) != 0);
935 #endif
936 #ifdef DEV_ACPI
937         has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0);
938 #endif
939
940         env = kern_getenv("kern.cfg.order");
941         if (env != NULL) {
942                 order = env;
943                 while (order != NULL) {
944                         if (has_acpi &&
945                             strncmp(order, "acpi", 4) == 0 &&
946                             (order[4] == ',' || order[4] == '\0')) {
947                                 arm64_bus_method = ARM64_BUS_ACPI;
948                                 break;
949                         }
950                         if (has_fdt &&
951                             strncmp(order, "fdt", 3) == 0 &&
952                             (order[3] == ',' || order[3] == '\0')) {
953                                 arm64_bus_method = ARM64_BUS_FDT;
954                                 break;
955                         }
956                         order = strchr(order, ',');
957                 }
958                 freeenv(env);
959
960                 /* If we set the bus method it is valid */
961                 if (arm64_bus_method != ARM64_BUS_NONE)
962                         return (true);
963         }
964         /* If no order or an invalid order was set use the default */
965         if (arm64_bus_method == ARM64_BUS_NONE) {
966                 if (has_fdt)
967                         arm64_bus_method = ARM64_BUS_FDT;
968                 else if (has_acpi)
969                         arm64_bus_method = ARM64_BUS_ACPI;
970         }
971
972         /*
973          * If no option was set the default is valid, otherwise we are
974          * setting one to get cninit() working, then calling panic to tell
975          * the user about the invalid bus setup.
976          */
977         return (env == NULL);
978 }
979
980 static void
981 cache_setup(void)
982 {
983         int dcache_line_shift, icache_line_shift, dczva_line_shift;
984         uint32_t ctr_el0;
985         uint32_t dczid_el0;
986
987         ctr_el0 = READ_SPECIALREG(ctr_el0);
988
989         /* Read the log2 words in each D cache line */
990         dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
991         /* Get the D cache line size */
992         dcache_line_size = sizeof(int) << dcache_line_shift;
993
994         /* And the same for the I cache */
995         icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
996         icache_line_size = sizeof(int) << icache_line_shift;
997
998         idcache_line_size = MIN(dcache_line_size, icache_line_size);
999
1000         dczid_el0 = READ_SPECIALREG(dczid_el0);
1001
1002         /* Check if dc zva is not prohibited */
1003         if (dczid_el0 & DCZID_DZP)
1004                 dczva_line_size = 0;
1005         else {
1006                 /* Same as with above calculations */
1007                 dczva_line_shift = DCZID_BS_SIZE(dczid_el0);
1008                 dczva_line_size = sizeof(int) << dczva_line_shift;
1009
1010                 /* Change pagezero function */
1011                 pagezero = pagezero_cache;
1012         }
1013 }
1014
1015 void
1016 initarm(struct arm64_bootparams *abp)
1017 {
1018         struct efi_map_header *efihdr;
1019         struct pcpu *pcpup;
1020         char *env;
1021 #ifdef FDT
1022         struct mem_region mem_regions[FDT_MEM_REGIONS];
1023         int mem_regions_sz;
1024 #endif
1025         vm_offset_t lastaddr;
1026         caddr_t kmdp;
1027         vm_paddr_t mem_len;
1028         bool valid;
1029         int i;
1030
1031         /* Set the module data location */
1032         preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
1033
1034         /* Find the kernel address */
1035         kmdp = preload_search_by_type("elf kernel");
1036         if (kmdp == NULL)
1037                 kmdp = preload_search_by_type("elf64 kernel");
1038
1039         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1040         init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);
1041
1042 #ifdef FDT
1043         try_load_dtb(kmdp);
1044 #endif
1045
1046         efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1047
1048         /* Find the address to start allocating from */
1049         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
1050
1051         /* Load the physical memory ranges */
1052         physmap_idx = 0;
1053         efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1054             MODINFO_METADATA | MODINFOMD_EFI_MAP);
1055         if (efihdr != NULL)
1056                 add_efi_map_entries(efihdr, physmap, &physmap_idx);
1057 #ifdef FDT
1058         else {
1059                 /* Grab physical memory regions information from device tree. */
1060                 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,
1061                     NULL) != 0)
1062                         panic("Cannot get physical memory regions");
1063                 add_fdt_mem_regions(mem_regions, mem_regions_sz, physmap,
1064                     &physmap_idx);
1065         }
1066 #endif
1067
1068         /* Print the memory map */
1069         mem_len = 0;
1070         for (i = 0; i < physmap_idx; i += 2) {
1071                 dump_avail[i] = physmap[i];
1072                 dump_avail[i + 1] = physmap[i + 1];
1073                 mem_len += physmap[i + 1] - physmap[i];
1074         }
1075         dump_avail[i] = 0;
1076         dump_avail[i + 1] = 0;
1077
1078         /* Set the pcpu data, this is needed by pmap_bootstrap */
1079         pcpup = &__pcpu[0];
1080         pcpu_init(pcpup, 0, sizeof(struct pcpu));
1081
1082         /*
1083          * Set the pcpu pointer with a backup in tpidr_el1 to be
1084          * loaded when entering the kernel from userland.
1085          */
1086         __asm __volatile(
1087             "mov x18, %0 \n"
1088             "msr tpidr_el1, %0" :: "r"(pcpup));
1089
1090         PCPU_SET(curthread, &thread0);
1091
1092         /* Do basic tuning, hz etc */
1093         init_param1();
1094
1095         cache_setup();
1096         pan_setup();
1097
1098         /* Bootstrap enough of pmap  to enter the kernel proper */
1099         pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt,
1100             KERNBASE - abp->kern_delta, lastaddr - KERNBASE);
1101
1102         devmap_bootstrap(0, NULL);
1103
1104         valid = bus_probe();
1105
1106         cninit();
1107
1108         if (!valid)
1109                 panic("Invalid bus configuration: %s",
1110                     kern_getenv("kern.cfg.order"));
1111
1112         init_proc0(abp->kern_stack);
1113         msgbufinit(msgbufp, msgbufsize);
1114         mutex_init();
1115         init_param2(physmem);
1116
1117         dbg_init();
1118         kdb_init();
1119         pan_enable();
1120
1121         env = kern_getenv("kernelname");
1122         if (env != NULL)
1123                 strlcpy(kernelname, env, sizeof(kernelname));
1124
1125         early_boot = 0;
1126 }
1127
1128 void
1129 dbg_init(void)
1130 {
1131
1132         /* Clear OS lock */
1133         WRITE_SPECIALREG(OSLAR_EL1, 0);
1134
1135         /* This permits DDB to use debug registers for watchpoints. */
1136         dbg_monitor_init();
1137
1138         /* TODO: Eventually will need to initialize debug registers here. */
1139 }
1140
1141 #ifdef DDB
1142 #include <ddb/ddb.h>
1143
1144 DB_SHOW_COMMAND(specialregs, db_show_spregs)
1145 {
1146 #define PRINT_REG(reg)  \
1147     db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg))
1148
1149         PRINT_REG(actlr_el1);
1150         PRINT_REG(afsr0_el1);
1151         PRINT_REG(afsr1_el1);
1152         PRINT_REG(aidr_el1);
1153         PRINT_REG(amair_el1);
1154         PRINT_REG(ccsidr_el1);
1155         PRINT_REG(clidr_el1);
1156         PRINT_REG(contextidr_el1);
1157         PRINT_REG(cpacr_el1);
1158         PRINT_REG(csselr_el1);
1159         PRINT_REG(ctr_el0);
1160         PRINT_REG(currentel);
1161         PRINT_REG(daif);
1162         PRINT_REG(dczid_el0);
1163         PRINT_REG(elr_el1);
1164         PRINT_REG(esr_el1);
1165         PRINT_REG(far_el1);
1166 #if 0
1167         /* ARM64TODO: Enable VFP before reading floating-point registers */
1168         PRINT_REG(fpcr);
1169         PRINT_REG(fpsr);
1170 #endif
1171         PRINT_REG(id_aa64afr0_el1);
1172         PRINT_REG(id_aa64afr1_el1);
1173         PRINT_REG(id_aa64dfr0_el1);
1174         PRINT_REG(id_aa64dfr1_el1);
1175         PRINT_REG(id_aa64isar0_el1);
1176         PRINT_REG(id_aa64isar1_el1);
1177         PRINT_REG(id_aa64pfr0_el1);
1178         PRINT_REG(id_aa64pfr1_el1);
1179         PRINT_REG(id_afr0_el1);
1180         PRINT_REG(id_dfr0_el1);
1181         PRINT_REG(id_isar0_el1);
1182         PRINT_REG(id_isar1_el1);
1183         PRINT_REG(id_isar2_el1);
1184         PRINT_REG(id_isar3_el1);
1185         PRINT_REG(id_isar4_el1);
1186         PRINT_REG(id_isar5_el1);
1187         PRINT_REG(id_mmfr0_el1);
1188         PRINT_REG(id_mmfr1_el1);
1189         PRINT_REG(id_mmfr2_el1);
1190         PRINT_REG(id_mmfr3_el1);
1191 #if 0
1192         /* Missing from llvm */
1193         PRINT_REG(id_mmfr4_el1);
1194 #endif
1195         PRINT_REG(id_pfr0_el1);
1196         PRINT_REG(id_pfr1_el1);
1197         PRINT_REG(isr_el1);
1198         PRINT_REG(mair_el1);
1199         PRINT_REG(midr_el1);
1200         PRINT_REG(mpidr_el1);
1201         PRINT_REG(mvfr0_el1);
1202         PRINT_REG(mvfr1_el1);
1203         PRINT_REG(mvfr2_el1);
1204         PRINT_REG(revidr_el1);
1205         PRINT_REG(sctlr_el1);
1206         PRINT_REG(sp_el0);
1207         PRINT_REG(spsel);
1208         PRINT_REG(spsr_el1);
1209         PRINT_REG(tcr_el1);
1210         PRINT_REG(tpidr_el0);
1211         PRINT_REG(tpidr_el1);
1212         PRINT_REG(tpidrro_el0);
1213         PRINT_REG(ttbr0_el1);
1214         PRINT_REG(ttbr1_el1);
1215         PRINT_REG(vbar_el1);
1216 #undef PRINT_REG
1217 }
1218
1219 DB_SHOW_COMMAND(vtop, db_show_vtop)
1220 {
1221         uint64_t phys;
1222
1223         if (have_addr) {
1224                 phys = arm64_address_translate_s1e1r(addr);
1225                 db_printf("EL1 physical address reg (read):  0x%016lx\n", phys);
1226                 phys = arm64_address_translate_s1e1w(addr);
1227                 db_printf("EL1 physical address reg (write): 0x%016lx\n", phys);
1228                 phys = arm64_address_translate_s1e0r(addr);
1229                 db_printf("EL0 physical address reg (read):  0x%016lx\n", phys);
1230                 phys = arm64_address_translate_s1e0w(addr);
1231                 db_printf("EL0 physical address reg (write): 0x%016lx\n", phys);
1232         } else
1233                 db_printf("show vtop <virt_addr>\n");
1234 }
1235 #endif