]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/ia64/ia64/machdep.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / ia64 / ia64 / machdep.c
1 /*-
2  * Copyright (c) 2003,2004 Marcel Moolenaar
3  * Copyright (c) 2000,2001 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_compat.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_sched.h"
35
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/buf.h>
41 #include <sys/bus.h>
42 #include <sys/cons.h>
43 #include <sys/cpu.h>
44 #include <sys/eventhandler.h>
45 #include <sys/exec.h>
46 #include <sys/imgact.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/linker.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/msgbuf.h>
54 #include <sys/pcpu.h>
55 #include <sys/ptrace.h>
56 #include <sys/random.h>
57 #include <sys/reboot.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscall.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysproto.h>
64 #include <sys/ucontext.h>
65 #include <sys/uio.h>
66 #include <sys/uuid.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69
70 #include <ddb/ddb.h>
71
72 #include <net/netisr.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_pager.h>
81
82 #include <machine/bootinfo.h>
83 #include <machine/cpu.h>
84 #include <machine/efi.h>
85 #include <machine/elf.h>
86 #include <machine/fpu.h>
87 #include <machine/intr.h>
88 #include <machine/mca.h>
89 #include <machine/md_var.h>
90 #include <machine/pal.h>
91 #include <machine/pcb.h>
92 #include <machine/reg.h>
93 #include <machine/sal.h>
94 #include <machine/sigframe.h>
95 #ifdef SMP
96 #include <machine/smp.h>
97 #endif
98 #include <machine/unwind.h>
99 #include <machine/vmparam.h>
100
101 /*
102  * For atomicity reasons, we demand that pc_curthread is the first
103  * field in the struct pcpu. It allows us to read the pointer with
104  * a single atomic instruction:
105  *      ld8 %curthread = [r13]
106  * Otherwise we would first have to calculate the load address and
107  * store the result in a temporary register and that for the load:
108  *      add %temp = %offsetof(struct pcpu), r13
109  *      ld8 %curthread = [%temp]
110  * A context switch inbetween the add and the ld8 could have the
111  * thread migrate to a different core. In that case,  %curthread
112  * would be the thread running on the original core and not actually
113  * the current thread.
114  */
115 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
116
117 static SYSCTL_NODE(_hw, OID_AUTO, freq, CTLFLAG_RD, 0, "");
118 static SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RD, 0, "");
119
120 static u_int bus_freq;
121 SYSCTL_UINT(_hw_freq, OID_AUTO, bus, CTLFLAG_RD, &bus_freq, 0,
122     "Bus clock frequency");
123
124 static u_int cpu_freq;
125 SYSCTL_UINT(_hw_freq, OID_AUTO, cpu, CTLFLAG_RD, &cpu_freq, 0,
126     "CPU clock frequency");
127
128 static u_int itc_freq;
129 SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTLFLAG_RD, &itc_freq, 0,
130     "ITC frequency");
131
132 int cold = 1;
133
134 struct bootinfo *bootinfo;
135
136 struct pcpu pcpu0;
137
138 extern u_int64_t kernel_text[], _end[];
139
140 extern u_int64_t ia64_gateway_page[];
141 extern u_int64_t break_sigtramp[];
142 extern u_int64_t epc_sigtramp[];
143
144 struct fpswa_iface *fpswa_iface;
145
146 vm_size_t ia64_pal_size;
147 vm_paddr_t ia64_pal_base;
148 vm_offset_t ia64_port_base;
149
150 u_int64_t ia64_lapic_addr = PAL_PIB_DEFAULT_ADDR;
151
152 struct ia64_pib *ia64_pib;
153
154 static int ia64_sync_icache_needed;
155
156 char machine[] = MACHINE;
157 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
158
159 static char cpu_model[64];
160 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
161     "The CPU model name");
162
163 static char cpu_family[64];
164 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
165     "The CPU family name");
166
167 #ifdef DDB
168 extern vm_offset_t ksym_start, ksym_end;
169 #endif
170
171 struct msgbuf *msgbufp = NULL;
172
173 /* Other subsystems (e.g., ACPI) can hook this later. */
174 void (*cpu_idle_hook)(void) = NULL;
175
176 struct kva_md_info kmi;
177
178 #define Mhz     1000000L
179 #define Ghz     (1000L*Mhz)
180
181 static void
182 identifycpu(void)
183 {
184         char vendor[17];
185         char *family_name, *model_name;
186         u_int64_t features, tmp;
187         int number, revision, model, family, archrev;
188
189         /*
190          * Assumes little-endian.
191          */
192         *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
193         *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
194         vendor[16] = '\0';
195
196         tmp = ia64_get_cpuid(3);
197         number = (tmp >> 0) & 0xff;
198         revision = (tmp >> 8) & 0xff;
199         model = (tmp >> 16) & 0xff;
200         family = (tmp >> 24) & 0xff;
201         archrev = (tmp >> 32) & 0xff;
202
203         family_name = model_name = "unknown";
204         switch (family) {
205         case 0x07:
206                 family_name = "Itanium";
207                 model_name = "Merced";
208                 break;
209         case 0x1f:
210                 family_name = "Itanium 2";
211                 switch (model) {
212                 case 0x00:
213                         model_name = "McKinley";
214                         break;
215                 case 0x01:
216                         /*
217                          * Deerfield is a low-voltage variant based on the
218                          * Madison core. We need circumstantial evidence
219                          * (i.e. the clock frequency) to identify those.
220                          * Allow for roughly 1% error margin.
221                          */
222                         if (cpu_freq > 990 && cpu_freq < 1010)
223                                 model_name = "Deerfield";
224                         else
225                                 model_name = "Madison";
226                         break;
227                 case 0x02:
228                         model_name = "Madison II";
229                         break;
230                 }
231                 break;
232         case 0x20:
233                 ia64_sync_icache_needed = 1;
234
235                 family_name = "Itanium 2";
236                 switch (model) {
237                 case 0x00:
238                         model_name = "Montecito";
239                         break;
240                 case 0x01:
241                         model_name = "Montvale";
242                         break;
243                 }
244                 break;
245         }
246         snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
247         snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
248
249         features = ia64_get_cpuid(4);
250
251         printf("CPU: %s (", model_name);
252         if (cpu_freq)
253                 printf("%u MHz ", cpu_freq);
254         printf("%s)\n", family_name);
255         printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
256         printf("  Features = 0x%b\n", (u_int32_t) features,
257             "\020"
258             "\001LB"    /* long branch (brl) instruction. */
259             "\002SD"    /* Spontaneous deferral. */
260             "\003AO"    /* 16-byte atomic operations (ld, st, cmpxchg). */ );
261 }
262
263 static void
264 cpu_startup(void *dummy)
265 {
266         char nodename[16];
267         struct pcpu *pc;
268         struct pcpu_stats *pcs;
269
270         /*
271          * Good {morning,afternoon,evening,night}.
272          */
273         identifycpu();
274
275 #ifdef PERFMON
276         perfmon_init();
277 #endif
278         printf("real memory  = %ld (%ld MB)\n", ptoa(realmem),
279             ptoa(realmem) / 1048576);
280
281         vm_ksubmap_init(&kmi);
282
283         printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
284             ptoa(cnt.v_free_count) / 1048576);
285  
286         if (fpswa_iface == NULL)
287                 printf("Warning: no FPSWA package supplied\n");
288         else
289                 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
290                     (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
291
292         /*
293          * Set up buffers, so they can be used to read disk labels.
294          */
295         bufinit();
296         vm_pager_bufferinit();
297
298         /*
299          * Traverse the MADT to discover IOSAPIC and Local SAPIC
300          * information.
301          */
302         ia64_probe_sapics();
303         ia64_pib = pmap_mapdev(ia64_lapic_addr, sizeof(*ia64_pib));
304
305         ia64_mca_init();
306
307         /*
308          * Create sysctl tree for per-CPU information.
309          */
310         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
311                 snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid);
312                 sysctl_ctx_init(&pc->pc_md.sysctl_ctx);
313                 pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx,
314                     SYSCTL_STATIC_CHILDREN(_machdep_cpu), OID_AUTO, nodename,
315                     CTLFLAG_RD, NULL, "");
316                 if (pc->pc_md.sysctl_tree == NULL)
317                         continue;
318
319                 pcs = &pc->pc_md.stats;
320
321                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
322                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
323                     "nasts", CTLFLAG_RD, &pcs->pcs_nasts,
324                     "Number of IPI_AST interrupts");
325
326                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
327                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
328                     "nclks", CTLFLAG_RD, &pcs->pcs_nclks,
329                     "Number of clock interrupts");
330
331                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
332                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
333                     "nextints", CTLFLAG_RD, &pcs->pcs_nextints,
334                     "Number of ExtINT interrupts");
335
336                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
337                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
338                     "nhardclocks", CTLFLAG_RD, &pcs->pcs_nhardclocks,
339                     "Number of IPI_HARDCLOCK interrupts");
340
341                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
342                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
343                     "nhighfps", CTLFLAG_RD, &pcs->pcs_nhighfps,
344                     "Number of IPI_HIGH_FP interrupts");
345
346                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
347                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
348                     "nhwints", CTLFLAG_RD, &pcs->pcs_nhwints,
349                     "Number of hardware (device) interrupts");
350
351                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
352                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
353                     "npreempts", CTLFLAG_RD, &pcs->pcs_npreempts,
354                     "Number of IPI_PREEMPT interrupts");
355
356                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
357                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
358                     "nrdvs", CTLFLAG_RD, &pcs->pcs_nrdvs,
359                     "Number of IPI_RENDEZVOUS interrupts");
360
361                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
362                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
363                     "nstops", CTLFLAG_RD, &pcs->pcs_nstops,
364                     "Number of IPI_STOP interrupts");
365
366                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
367                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
368                     "nstrays", CTLFLAG_RD, &pcs->pcs_nstrays,
369                     "Number of stray interrupts");
370         }
371 }
372 SYSINIT(cpu_startup, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
373
374 void
375 cpu_flush_dcache(void *ptr, size_t len)
376 {
377         vm_offset_t lim, va;
378
379         va = (uintptr_t)ptr & ~31;
380         lim = (uintptr_t)ptr + len;
381         while (va < lim) {
382                 ia64_fc(va);
383                 va += 32;
384         }
385
386         ia64_srlz_d();
387 }
388
389 /* Get current clock frequency for the given cpu id. */
390 int
391 cpu_est_clockrate(int cpu_id, uint64_t *rate)
392 {
393
394         if (pcpu_find(cpu_id) == NULL || rate == NULL)
395                 return (EINVAL);
396         *rate = (u_long)cpu_freq * 1000000ul;
397         return (0);
398 }
399
400 void
401 cpu_halt()
402 {
403
404         efi_reset_system();
405 }
406
407 void
408 cpu_idle(int busy)
409 {
410         register_t ie;
411
412         if (!busy) {
413                 critical_enter();
414                 cpu_idleclock();
415         }
416
417         ie = intr_disable();
418         KASSERT(ie != 0, ("%s called with interrupts disabled\n", __func__));
419
420         if (sched_runnable())
421                 ia64_enable_intr();
422         else if (cpu_idle_hook != NULL) {
423                 (*cpu_idle_hook)();
424                 /* The hook must enable interrupts! */
425         } else {
426                 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
427                 ia64_enable_intr();
428         }
429
430         if (!busy) {
431                 cpu_activeclock();
432                 critical_exit();
433         }
434 }
435
436 int
437 cpu_idle_wakeup(int cpu)
438 {
439
440         return (0);
441 }
442
443 void
444 cpu_reset()
445 {
446
447         efi_reset_system();
448 }
449
450 void
451 cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
452 {
453         struct pcb *oldpcb, *newpcb;
454
455         oldpcb = old->td_pcb;
456 #ifdef COMPAT_FREEBSD32
457         ia32_savectx(oldpcb);
458 #endif
459         if (PCPU_GET(fpcurthread) == old)
460                 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
461         if (!savectx(oldpcb)) {
462                 newpcb = new->td_pcb;
463                 oldpcb->pcb_current_pmap =
464                     pmap_switch(newpcb->pcb_current_pmap);
465
466                 atomic_store_rel_ptr(&old->td_lock, mtx);
467
468 #if defined(SCHED_ULE) && defined(SMP)
469                 while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
470                         cpu_spinwait();
471 #endif
472
473                 PCPU_SET(curthread, new);
474
475 #ifdef COMPAT_FREEBSD32
476                 ia32_restorectx(newpcb);
477 #endif
478
479                 if (PCPU_GET(fpcurthread) == new)
480                         new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
481                 restorectx(newpcb);
482                 /* We should not get here. */
483                 panic("cpu_switch: restorectx() returned");
484                 /* NOTREACHED */
485         }
486 }
487
488 void
489 cpu_throw(struct thread *old __unused, struct thread *new)
490 {
491         struct pcb *newpcb;
492
493         newpcb = new->td_pcb;
494         (void)pmap_switch(newpcb->pcb_current_pmap);
495
496 #if defined(SCHED_ULE) && defined(SMP)
497         while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
498                 cpu_spinwait();
499 #endif
500
501         PCPU_SET(curthread, new);
502
503 #ifdef COMPAT_FREEBSD32
504         ia32_restorectx(newpcb);
505 #endif
506
507         restorectx(newpcb);
508         /* We should not get here. */
509         panic("cpu_throw: restorectx() returned");
510         /* NOTREACHED */
511 }
512
513 void
514 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
515 {
516
517         /*
518          * Set pc_acpi_id to "uninitialized".
519          * See sys/dev/acpica/acpi_cpu.c
520          */
521         pcpu->pc_acpi_id = 0xffffffff;
522 }
523
524 void
525 spinlock_enter(void)
526 {
527         struct thread *td;
528         int intr;
529
530         td = curthread;
531         if (td->td_md.md_spinlock_count == 0) {
532                 intr = intr_disable();
533                 td->td_md.md_spinlock_count = 1;
534                 td->td_md.md_saved_intr = intr;
535         } else
536                 td->td_md.md_spinlock_count++;
537         critical_enter();
538 }
539
540 void
541 spinlock_exit(void)
542 {
543         struct thread *td;
544         int intr;
545
546         td = curthread;
547         critical_exit();
548         intr = td->td_md.md_saved_intr;
549         td->td_md.md_spinlock_count--;
550         if (td->td_md.md_spinlock_count == 0)
551                 intr_restore(intr);
552 }
553
554 void
555 map_vhpt(uintptr_t vhpt)
556 {
557         pt_entry_t pte;
558         uint64_t psr;
559
560         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
561             PTE_PL_KERN | PTE_AR_RW;
562         pte |= vhpt & PTE_PPN_MASK;
563
564         __asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
565             "r"(pmap_vhpt_log2size << 2));
566
567         __asm __volatile("mov   %0=psr" : "=r"(psr));
568         __asm __volatile("rsm   psr.ic|psr.i");
569         ia64_srlz_i();
570         ia64_set_ifa(vhpt);
571         ia64_set_itir(pmap_vhpt_log2size << 2);
572         ia64_srlz_d();
573         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
574         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
575         ia64_srlz_i();
576 }
577
578 void
579 map_pal_code(void)
580 {
581         pt_entry_t pte;
582         vm_offset_t va;
583         vm_size_t sz;
584         uint64_t psr;
585         u_int shft;
586
587         if (ia64_pal_size == 0)
588                 return;
589
590         va = IA64_PHYS_TO_RR7(ia64_pal_base);
591
592         sz = ia64_pal_size;
593         shft = 0;
594         while (sz > 1) {
595                 shft++;
596                 sz >>= 1;
597         }
598
599         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
600             PTE_PL_KERN | PTE_AR_RWX;
601         pte |= ia64_pal_base & PTE_PPN_MASK;
602
603         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: "r"(va), "r"(shft<<2));
604
605         __asm __volatile("mov   %0=psr" : "=r"(psr));
606         __asm __volatile("rsm   psr.ic|psr.i");
607         ia64_srlz_i();
608         ia64_set_ifa(va);
609         ia64_set_itir(shft << 2);
610         ia64_srlz_d();
611         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(4), "r"(pte));
612         ia64_srlz_d();
613         __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
614         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
615         ia64_srlz_i();
616 }
617
618 void
619 map_gateway_page(void)
620 {
621         pt_entry_t pte;
622         uint64_t psr;
623
624         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
625             PTE_PL_KERN | PTE_AR_X_RX;
626         pte |= ia64_tpa((uint64_t)ia64_gateway_page) & PTE_PPN_MASK;
627
628         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
629             "r"(VM_MAXUSER_ADDRESS), "r"(PAGE_SHIFT << 2));
630
631         __asm __volatile("mov   %0=psr" : "=r"(psr));
632         __asm __volatile("rsm   psr.ic|psr.i");
633         ia64_srlz_i();
634         ia64_set_ifa(VM_MAXUSER_ADDRESS);
635         ia64_set_itir(PAGE_SHIFT << 2);
636         ia64_srlz_d();
637         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(5), "r"(pte));
638         ia64_srlz_d();
639         __asm __volatile("itr.i itr[%0]=%1" :: "r"(2), "r"(pte));
640         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
641         ia64_srlz_i();
642
643         /* Expose the mapping to userland in ar.k5 */
644         ia64_set_k5(VM_MAXUSER_ADDRESS);
645 }
646
647 static u_int
648 freq_ratio(u_long base, u_long ratio)
649 {
650         u_long f;
651
652         f = (base * (ratio >> 32)) / (ratio & 0xfffffffful);
653         return ((f + 500000) / 1000000);
654 }
655
656 static void
657 calculate_frequencies(void)
658 {
659         struct ia64_sal_result sal;
660         struct ia64_pal_result pal;
661         register_t ie;
662
663         ie = intr_disable();
664         sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
665         pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
666         intr_restore(ie);
667
668         if (sal.sal_status == 0 && pal.pal_status == 0) {
669                 if (bootverbose) {
670                         printf("Platform clock frequency %ld Hz\n",
671                                sal.sal_result[0]);
672                         printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
673                                "ITC ratio %ld/%ld\n",
674                                pal.pal_result[0] >> 32,
675                                pal.pal_result[0] & ((1L << 32) - 1),
676                                pal.pal_result[1] >> 32,
677                                pal.pal_result[1] & ((1L << 32) - 1),
678                                pal.pal_result[2] >> 32,
679                                pal.pal_result[2] & ((1L << 32) - 1));
680                 }
681                 cpu_freq = freq_ratio(sal.sal_result[0], pal.pal_result[0]);
682                 bus_freq = freq_ratio(sal.sal_result[0], pal.pal_result[1]);
683                 itc_freq = freq_ratio(sal.sal_result[0], pal.pal_result[2]);
684         }
685 }
686
687 struct ia64_init_return
688 ia64_init(void)
689 {
690         struct ia64_init_return ret;
691         struct efi_md *md;
692         pt_entry_t *pbvm_pgtbl_ent, *pbvm_pgtbl_lim;
693         char *p;
694         vm_size_t mdlen;
695         int metadata_missing;
696
697         /*
698          * NO OUTPUT ALLOWED UNTIL FURTHER NOTICE.
699          */
700
701         ia64_set_fpsr(IA64_FPSR_DEFAULT);
702
703         /*
704          * Region 6 is direct mapped UC and region 7 is direct mapped
705          * WC. The details of this is controlled by the Alt {I,D}TLB
706          * handlers. Here we just make sure that they have the largest
707          * possible page size to minimise TLB usage.
708          */
709         ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (PAGE_SHIFT << 2));
710         ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (PAGE_SHIFT << 2));
711         ia64_srlz_d();
712
713         /* Initialize/setup physical memory datastructures */
714         ia64_physmem_init();
715
716         /*
717          * Process the memory map. This gives us the PAL locations,
718          * the I/O port base address, the available memory regions
719          * for initializing the physical memory map.
720          */
721         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
722                 mdlen = md->md_pages * EFI_PAGE_SIZE;
723                 switch (md->md_type) {
724                 case EFI_MD_TYPE_IOPORT:
725                         ia64_port_base = (uintptr_t)pmap_mapdev(md->md_phys,
726                             mdlen);
727                         break;
728                 case EFI_MD_TYPE_PALCODE:
729                         ia64_pal_base = md->md_phys;
730                         ia64_pal_size = mdlen;
731                         /*FALLTHROUGH*/
732                 case EFI_MD_TYPE_BAD:
733                 case EFI_MD_TYPE_FIRMWARE:
734                 case EFI_MD_TYPE_RECLAIM:
735                 case EFI_MD_TYPE_RT_CODE:
736                 case EFI_MD_TYPE_RT_DATA:
737                         /* Don't use these memory regions. */
738                         ia64_physmem_track(md->md_phys, mdlen);
739                         break;
740                 case EFI_MD_TYPE_BS_CODE:
741                 case EFI_MD_TYPE_BS_DATA:
742                 case EFI_MD_TYPE_CODE:
743                 case EFI_MD_TYPE_DATA:
744                 case EFI_MD_TYPE_FREE:
745                         /* These are ok to use. */
746                         ia64_physmem_add(md->md_phys, mdlen);
747                         break;
748                 }
749         }
750
751         /*
752          * Remove the PBVM and its page table from phys_avail. The loader
753          * passes the physical address of the page table to us. The virtual
754          * address of the page table is fixed.
755          * Track and the PBVM limit for later use.
756          */
757         ia64_physmem_delete(bootinfo->bi_pbvm_pgtbl, bootinfo->bi_pbvm_pgtblsz);
758         pbvm_pgtbl_ent = (void *)IA64_PBVM_PGTBL;
759         pbvm_pgtbl_lim = (void *)(IA64_PBVM_PGTBL + bootinfo->bi_pbvm_pgtblsz);
760         while (pbvm_pgtbl_ent < pbvm_pgtbl_lim) {
761                 if ((*pbvm_pgtbl_ent & PTE_PRESENT) == 0)
762                         break;
763                 ia64_physmem_delete(*pbvm_pgtbl_ent & PTE_PPN_MASK,
764                     IA64_PBVM_PAGE_SIZE);
765                 pbvm_pgtbl_ent++;
766         }
767
768         /* Finalize physical memory datastructures */
769         ia64_physmem_fini();
770
771         metadata_missing = 0;
772         if (bootinfo->bi_modulep)
773                 preload_metadata = (caddr_t)bootinfo->bi_modulep;
774         else
775                 metadata_missing = 1;
776
777         if (envmode == 0 && bootinfo->bi_envp)
778                 kern_envp = (caddr_t)bootinfo->bi_envp;
779         else
780                 kern_envp = static_env;
781
782         /*
783          * Look at arguments passed to us and compute boothowto.
784          */
785         boothowto = bootinfo->bi_boothowto;
786
787         if (boothowto & RB_VERBOSE)
788                 bootverbose = 1;
789
790         /*
791          * Wire things up so we can call the firmware.
792          */
793         map_pal_code();
794         efi_boot_minimal(bootinfo->bi_systab);
795         ia64_xiv_init();
796         ia64_sal_init();
797         calculate_frequencies();
798
799         set_cputicker(ia64_get_itc, (u_long)itc_freq * 1000000, 0);
800
801         /*
802          * Setup the PCPU data for the bootstrap processor. It is needed
803          * by printf(). Also, since printf() has critical sections, we
804          * need to initialize at least pc_curthread.
805          */
806         pcpup = &pcpu0;
807         ia64_set_k4((u_int64_t)pcpup);
808         pcpu_init(pcpup, 0, sizeof(pcpu0));
809         dpcpu_init(ia64_physmem_alloc(DPCPU_SIZE, PAGE_SIZE), 0);
810         PCPU_SET(md.lid, ia64_get_lid());
811         PCPU_SET(curthread, &thread0);
812
813         /*
814          * Initialize the console before we print anything out.
815          */
816         cninit();
817
818         /* OUTPUT NOW ALLOWED */
819
820         if (metadata_missing)
821                 printf("WARNING: loader(8) metadata is missing!\n");
822
823         /* Get FPSWA interface */
824         fpswa_iface = (bootinfo->bi_fpswa == 0) ? NULL :
825             (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo->bi_fpswa);
826
827         /* Init basic tunables, including hz */
828         init_param1();
829
830         p = getenv("kernelname");
831         if (p != NULL) {
832                 strlcpy(kernelname, p, sizeof(kernelname));
833                 freeenv(p);
834         }
835
836         init_param2(physmem);
837
838         /*
839          * Initialize error message buffer (at end of core).
840          */
841         msgbufp = ia64_physmem_alloc(msgbufsize, PAGE_SIZE);
842         msgbufinit(msgbufp, msgbufsize);
843
844         proc_linkup0(&proc0, &thread0);
845         /*
846          * Init mapping for kernel stack for proc 0
847          */
848         p = ia64_physmem_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
849         thread0.td_kstack = (uintptr_t)p;
850         thread0.td_kstack_pages = KSTACK_PAGES;
851
852         mutex_init();
853
854         /*
855          * Initialize the rest of proc 0's PCB.
856          *
857          * Set the kernel sp, reserving space for an (empty) trapframe,
858          * and make proc0's trapframe pointer point to it for sanity.
859          * Initialise proc0's backing store to start after u area.
860          */
861         cpu_thread_alloc(&thread0);
862         thread0.td_frame->tf_flags = FRAME_SYSCALL;
863         thread0.td_pcb->pcb_special.sp =
864             (u_int64_t)thread0.td_frame - 16;
865         thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
866
867         /*
868          * Initialize the virtual memory system.
869          */
870         pmap_bootstrap();
871
872         /*
873          * Initialize debuggers, and break into them if appropriate.
874          */
875 #ifdef DDB
876         ksym_start = bootinfo->bi_symtab;
877         ksym_end = bootinfo->bi_esymtab;
878 #endif
879
880         kdb_init();
881
882 #ifdef KDB
883         if (boothowto & RB_KDB)
884                 kdb_enter(KDB_WHY_BOOTFLAGS,
885                     "Boot flags requested debugger\n");
886 #endif
887
888         ia64_set_tpr(0);
889         ia64_srlz_d();
890
891         ret.bspstore = thread0.td_pcb->pcb_special.bspstore;
892         ret.sp = thread0.td_pcb->pcb_special.sp;
893         return (ret);
894 }
895
896 uint64_t
897 ia64_get_hcdp(void)
898 {
899
900         return (bootinfo->bi_hcdp);
901 }
902
903 void
904 bzero(void *buf, size_t len)
905 {
906         caddr_t p = buf;
907
908         while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
909                 *p++ = 0;
910                 len--;
911         }
912         while (len >= sizeof(u_long) * 8) {
913                 *(u_long*) p = 0;
914                 *((u_long*) p + 1) = 0;
915                 *((u_long*) p + 2) = 0;
916                 *((u_long*) p + 3) = 0;
917                 len -= sizeof(u_long) * 8;
918                 *((u_long*) p + 4) = 0;
919                 *((u_long*) p + 5) = 0;
920                 *((u_long*) p + 6) = 0;
921                 *((u_long*) p + 7) = 0;
922                 p += sizeof(u_long) * 8;
923         }
924         while (len >= sizeof(u_long)) {
925                 *(u_long*) p = 0;
926                 len -= sizeof(u_long);
927                 p += sizeof(u_long);
928         }
929         while (len) {
930                 *p++ = 0;
931                 len--;
932         }
933 }
934
935 u_int
936 ia64_itc_freq(void)
937 {
938
939         return (itc_freq);
940 }
941
942 void
943 DELAY(int n)
944 {
945         u_int64_t start, end, now;
946
947         sched_pin();
948
949         start = ia64_get_itc();
950         end = start + itc_freq * n;
951         /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
952         do {
953                 now = ia64_get_itc();
954         } while (now < end || (now > start && end < start));
955
956         sched_unpin();
957 }
958
959 /*
960  * Send an interrupt (signal) to a process.
961  */
962 void
963 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
964 {
965         struct proc *p;
966         struct thread *td;
967         struct trapframe *tf;
968         struct sigacts *psp;
969         struct sigframe sf, *sfp;
970         u_int64_t sbs, sp;
971         int oonstack;
972         int sig;
973         u_long code;
974
975         td = curthread;
976         p = td->td_proc;
977         PROC_LOCK_ASSERT(p, MA_OWNED);
978         sig = ksi->ksi_signo;
979         code = ksi->ksi_code;
980         psp = p->p_sigacts;
981         mtx_assert(&psp->ps_mtx, MA_OWNED);
982         tf = td->td_frame;
983         sp = tf->tf_special.sp;
984         oonstack = sigonstack(sp);
985         sbs = 0;
986
987         /* save user context */
988         bzero(&sf, sizeof(struct sigframe));
989         sf.sf_uc.uc_sigmask = *mask;
990         sf.sf_uc.uc_stack = td->td_sigstk;
991         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
992             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
993
994         /*
995          * Allocate and validate space for the signal handler
996          * context. Note that if the stack is in P0 space, the
997          * call to grow() is a nop, and the useracc() check
998          * will fail if the process has not already allocated
999          * the space with a `brk'.
1000          */
1001         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
1002             SIGISMEMBER(psp->ps_sigonstack, sig)) {
1003                 sbs = (u_int64_t)td->td_sigstk.ss_sp;
1004                 sbs = (sbs + 15) & ~15;
1005                 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
1006 #if defined(COMPAT_43)
1007                 td->td_sigstk.ss_flags |= SS_ONSTACK;
1008 #endif
1009         } else
1010                 sfp = (struct sigframe *)sp;
1011         sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
1012
1013         /* Fill in the siginfo structure for POSIX handlers. */
1014         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
1015                 sf.sf_si = ksi->ksi_info;
1016                 sf.sf_si.si_signo = sig;
1017                 /*
1018                  * XXX this shouldn't be here after code in trap.c
1019                  * is fixed
1020                  */
1021                 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
1022                 code = (u_int64_t)&sfp->sf_si;
1023         }
1024
1025         mtx_unlock(&psp->ps_mtx);
1026         PROC_UNLOCK(p);
1027
1028         get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
1029
1030         /* Copy the frame out to userland. */
1031         if (copyout(&sf, sfp, sizeof(sf)) != 0) {
1032                 /*
1033                  * Process has trashed its stack; give it an illegal
1034                  * instruction to halt it in its tracks.
1035                  */
1036                 PROC_LOCK(p);
1037                 sigexit(td, SIGILL);
1038                 return;
1039         }
1040
1041         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
1042                 tf->tf_special.psr &= ~IA64_PSR_RI;
1043                 tf->tf_special.iip = ia64_get_k5() +
1044                     ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
1045         } else
1046                 tf->tf_special.iip = ia64_get_k5() +
1047                     ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
1048
1049         /*
1050          * Setup the trapframe to return to the signal trampoline. We pass
1051          * information to the trampoline in the following registers:
1052          *
1053          *      gp      new backing store or NULL
1054          *      r8      signal number
1055          *      r9      signal code or siginfo pointer
1056          *      r10     signal handler (function descriptor)
1057          */
1058         tf->tf_special.sp = (u_int64_t)sfp - 16;
1059         tf->tf_special.gp = sbs;
1060         tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
1061         tf->tf_special.ndirty = 0;
1062         tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
1063         tf->tf_scratch.gr8 = sig;
1064         tf->tf_scratch.gr9 = code;
1065         tf->tf_scratch.gr10 = (u_int64_t)catcher;
1066
1067         PROC_LOCK(p);
1068         mtx_lock(&psp->ps_mtx);
1069 }
1070
1071 /*
1072  * System call to cleanup state after a signal
1073  * has been taken.  Reset signal mask and
1074  * stack state from context left by sendsig (above).
1075  * Return to previous pc and psl as specified by
1076  * context left by sendsig. Check carefully to
1077  * make sure that the user has not modified the
1078  * state to gain improper privileges.
1079  *
1080  * MPSAFE
1081  */
1082 int
1083 sys_sigreturn(struct thread *td,
1084         struct sigreturn_args /* {
1085                 ucontext_t *sigcntxp;
1086         } */ *uap)
1087 {
1088         ucontext_t uc;
1089         struct trapframe *tf;
1090         struct pcb *pcb;
1091
1092         tf = td->td_frame;
1093         pcb = td->td_pcb;
1094
1095         /*
1096          * Fetch the entire context structure at once for speed.
1097          * We don't use a normal argument to simplify RSE handling.
1098          */
1099         if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1100                 return (EFAULT);
1101
1102         set_mcontext(td, &uc.uc_mcontext);
1103
1104 #if defined(COMPAT_43)
1105         if (sigonstack(tf->tf_special.sp))
1106                 td->td_sigstk.ss_flags |= SS_ONSTACK;
1107         else
1108                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1109 #endif
1110         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
1111
1112         return (EJUSTRETURN);
1113 }
1114
1115 #ifdef COMPAT_FREEBSD4
1116 int
1117 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1118 {
1119
1120         return sys_sigreturn(td, (struct sigreturn_args *)uap);
1121 }
1122 #endif
1123
1124 /*
1125  * Construct a PCB from a trapframe. This is called from kdb_trap() where
1126  * we want to start a backtrace from the function that caused us to enter
1127  * the debugger. We have the context in the trapframe, but base the trace
1128  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1129  * enough for a backtrace.
1130  */
1131 void
1132 makectx(struct trapframe *tf, struct pcb *pcb)
1133 {
1134
1135         pcb->pcb_special = tf->tf_special;
1136         pcb->pcb_special.__spare = ~0UL;        /* XXX see unwind.c */
1137         save_callee_saved(&pcb->pcb_preserved);
1138         save_callee_saved_fp(&pcb->pcb_preserved_fp);
1139 }
1140
1141 int
1142 ia64_flush_dirty(struct thread *td, struct _special *r)
1143 {
1144         struct iovec iov;
1145         struct uio uio;
1146         uint64_t bspst, kstk, rnat;
1147         int error, locked;
1148
1149         if (r->ndirty == 0)
1150                 return (0);
1151
1152         kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
1153         if (td == curthread) {
1154                 __asm __volatile("mov   ar.rsc=0;;");
1155                 __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
1156                 /* Make sure we have all the user registers written out. */
1157                 if (bspst - kstk < r->ndirty) {
1158                         __asm __volatile("flushrs;;");
1159                         __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
1160                 }
1161                 __asm __volatile("mov   %0=ar.rnat;;" : "=r"(rnat));
1162                 __asm __volatile("mov   ar.rsc=3");
1163                 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
1164                 kstk += r->ndirty;
1165                 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
1166                     ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
1167         } else {
1168                 locked = PROC_LOCKED(td->td_proc);
1169                 if (!locked)
1170                         PHOLD(td->td_proc);
1171                 iov.iov_base = (void*)(uintptr_t)kstk;
1172                 iov.iov_len = r->ndirty;
1173                 uio.uio_iov = &iov;
1174                 uio.uio_iovcnt = 1;
1175                 uio.uio_offset = r->bspstore;
1176                 uio.uio_resid = r->ndirty;
1177                 uio.uio_segflg = UIO_SYSSPACE;
1178                 uio.uio_rw = UIO_WRITE;
1179                 uio.uio_td = td;
1180                 error = proc_rwmem(td->td_proc, &uio);
1181                 /*
1182                  * XXX proc_rwmem() doesn't currently return ENOSPC,
1183                  * so I think it can bogusly return 0. Neither do
1184                  * we allow short writes.
1185                  */
1186                 if (uio.uio_resid != 0 && error == 0)
1187                         error = ENOSPC;
1188                 if (!locked)
1189                         PRELE(td->td_proc);
1190         }
1191
1192         r->bspstore += r->ndirty;
1193         r->ndirty = 0;
1194         return (error);
1195 }
1196
1197 int
1198 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1199 {
1200         struct trapframe *tf;
1201         int error;
1202
1203         tf = td->td_frame;
1204         bzero(mc, sizeof(*mc));
1205         mc->mc_special = tf->tf_special;
1206         error = ia64_flush_dirty(td, &mc->mc_special);
1207         if (tf->tf_flags & FRAME_SYSCALL) {
1208                 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
1209                 mc->mc_scratch = tf->tf_scratch;
1210                 if (flags & GET_MC_CLEAR_RET) {
1211                         mc->mc_scratch.gr8 = 0;
1212                         mc->mc_scratch.gr9 = 0;
1213                         mc->mc_scratch.gr10 = 0;
1214                         mc->mc_scratch.gr11 = 0;
1215                 }
1216         } else {
1217                 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1218                 mc->mc_scratch = tf->tf_scratch;
1219                 mc->mc_scratch_fp = tf->tf_scratch_fp;
1220                 /*
1221                  * XXX If the thread never used the high FP registers, we
1222                  * probably shouldn't waste time saving them.
1223                  */
1224                 ia64_highfp_save(td);
1225                 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
1226                 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
1227         }
1228         save_callee_saved(&mc->mc_preserved);
1229         save_callee_saved_fp(&mc->mc_preserved_fp);
1230         return (error);
1231 }
1232
1233 int
1234 set_mcontext(struct thread *td, const mcontext_t *mc)
1235 {
1236         struct _special s;
1237         struct trapframe *tf;
1238         uint64_t psrmask;
1239
1240         tf = td->td_frame;
1241
1242         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1243             ("Whoa there! We have more than 8KB of dirty registers!"));
1244
1245         s = mc->mc_special;
1246         /*
1247          * Only copy the user mask and the restart instruction bit from
1248          * the new context.
1249          */
1250         psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1251             IA64_PSR_MFH | IA64_PSR_RI;
1252         s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1253         /* We don't have any dirty registers of the new context. */
1254         s.ndirty = 0;
1255         if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1256                 /*
1257                  * We can get an async context passed to us while we
1258                  * entered the kernel through a syscall: sigreturn(2)
1259                  * takes contexts that could previously be the result of
1260                  * a trap or interrupt.
1261                  * Hence, we cannot assert that the trapframe is not
1262                  * a syscall frame, but we can assert that it's at
1263                  * least an expected syscall.
1264                  */
1265                 if (tf->tf_flags & FRAME_SYSCALL) {
1266                         KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn, ("foo"));
1267                         tf->tf_flags &= ~FRAME_SYSCALL;
1268                 }
1269                 tf->tf_scratch = mc->mc_scratch;
1270                 tf->tf_scratch_fp = mc->mc_scratch_fp;
1271                 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
1272                         td->td_pcb->pcb_high_fp = mc->mc_high_fp;
1273         } else {
1274                 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1275                 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
1276                         s.cfm = tf->tf_special.cfm;
1277                         s.iip = tf->tf_special.iip;
1278                         tf->tf_scratch.gr15 = 0;        /* Clear syscall nr. */
1279                 } else
1280                         tf->tf_scratch = mc->mc_scratch;
1281         }
1282         tf->tf_special = s;
1283         restore_callee_saved(&mc->mc_preserved);
1284         restore_callee_saved_fp(&mc->mc_preserved_fp);
1285
1286         return (0);
1287 }
1288
1289 /*
1290  * Clear registers on exec.
1291  */
1292 void
1293 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1294 {
1295         struct trapframe *tf;
1296         uint64_t *ksttop, *kst;
1297
1298         tf = td->td_frame;
1299         ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1300             (tf->tf_special.bspstore & 0x1ffUL));
1301
1302         /*
1303          * We can ignore up to 8KB of dirty registers by masking off the
1304          * lower 13 bits in exception_restore() or epc_syscall(). This
1305          * should be enough for a couple of years, but if there are more
1306          * than 8KB of dirty registers, we lose track of the bottom of
1307          * the kernel stack. The solution is to copy the active part of
1308          * the kernel stack down 1 page (or 2, but not more than that)
1309          * so that we always have less than 8KB of dirty registers.
1310          */
1311         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1312             ("Whoa there! We have more than 8KB of dirty registers!"));
1313
1314         bzero(&tf->tf_special, sizeof(tf->tf_special));
1315         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {      /* break syscalls. */
1316                 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1317                 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1318                 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1319                 tf->tf_special.bspstore = IA64_BACKINGSTORE;
1320                 /*
1321                  * Copy the arguments onto the kernel register stack so that
1322                  * they get loaded by the loadrs instruction. Skip over the
1323                  * NaT collection points.
1324                  */
1325                 kst = ksttop - 1;
1326                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1327                         *kst-- = 0;
1328                 *kst-- = 0;
1329                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1330                         *kst-- = 0;
1331                 *kst-- = imgp->ps_strings;
1332                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1333                         *kst-- = 0;
1334                 *kst = stack;
1335                 tf->tf_special.ndirty = (ksttop - kst) << 3;
1336         } else {                                /* epc syscalls (default). */
1337                 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1338                 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1339                 /*
1340                  * Write values for out0, out1 and out2 to the user's backing
1341                  * store and arrange for them to be restored into the user's
1342                  * initial register frame.
1343                  * Assumes that (bspstore & 0x1f8) < 0x1e0.
1344                  */
1345                 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1346                 suword((caddr_t)tf->tf_special.bspstore - 16, imgp->ps_strings);
1347                 suword((caddr_t)tf->tf_special.bspstore -  8, 0);
1348         }
1349
1350         tf->tf_special.iip = imgp->entry_addr;
1351         tf->tf_special.sp = (stack & ~15) - 16;
1352         tf->tf_special.rsc = 0xf;
1353         tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1354         tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1355             IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1356             IA64_PSR_CPL_USER;
1357 }
1358
1359 int
1360 ptrace_set_pc(struct thread *td, unsigned long addr)
1361 {
1362         uint64_t slot;
1363
1364         switch (addr & 0xFUL) {
1365         case 0:
1366                 slot = IA64_PSR_RI_0;
1367                 break;
1368         case 1:
1369                 /* XXX we need to deal with MLX bundles here */
1370                 slot = IA64_PSR_RI_1;
1371                 break;
1372         case 2:
1373                 slot = IA64_PSR_RI_2;
1374                 break;
1375         default:
1376                 return (EINVAL);
1377         }
1378
1379         td->td_frame->tf_special.iip = addr & ~0x0FULL;
1380         td->td_frame->tf_special.psr =
1381             (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1382         return (0);
1383 }
1384
1385 int
1386 ptrace_single_step(struct thread *td)
1387 {
1388         struct trapframe *tf;
1389
1390         /*
1391          * There's no way to set single stepping when we're leaving the
1392          * kernel through the EPC syscall path. The way we solve this is
1393          * by enabling the lower-privilege trap so that we re-enter the
1394          * kernel as soon as the privilege level changes. See trap.c for
1395          * how we proceed from there.
1396          */
1397         tf = td->td_frame;
1398         if (tf->tf_flags & FRAME_SYSCALL)
1399                 tf->tf_special.psr |= IA64_PSR_LP;
1400         else
1401                 tf->tf_special.psr |= IA64_PSR_SS;
1402         return (0);
1403 }
1404
1405 int
1406 ptrace_clear_single_step(struct thread *td)
1407 {
1408         struct trapframe *tf;
1409
1410         /*
1411          * Clear any and all status bits we may use to implement single
1412          * stepping.
1413          */
1414         tf = td->td_frame;
1415         tf->tf_special.psr &= ~IA64_PSR_SS;
1416         tf->tf_special.psr &= ~IA64_PSR_LP;
1417         tf->tf_special.psr &= ~IA64_PSR_TB;
1418         return (0);
1419 }
1420
1421 int
1422 fill_regs(struct thread *td, struct reg *regs)
1423 {
1424         struct trapframe *tf;
1425
1426         tf = td->td_frame;
1427         regs->r_special = tf->tf_special;
1428         regs->r_scratch = tf->tf_scratch;
1429         save_callee_saved(&regs->r_preserved);
1430         return (0);
1431 }
1432
1433 int
1434 set_regs(struct thread *td, struct reg *regs)
1435 {
1436         struct trapframe *tf;
1437         int error;
1438
1439         tf = td->td_frame;
1440         error = ia64_flush_dirty(td, &tf->tf_special);
1441         if (!error) {
1442                 tf->tf_special = regs->r_special;
1443                 tf->tf_special.bspstore += tf->tf_special.ndirty;
1444                 tf->tf_special.ndirty = 0;
1445                 tf->tf_scratch = regs->r_scratch;
1446                 restore_callee_saved(&regs->r_preserved);
1447         }
1448         return (error);
1449 }
1450
1451 int
1452 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1453 {
1454
1455         return (ENOSYS);
1456 }
1457
1458 int
1459 set_dbregs(struct thread *td, struct dbreg *dbregs)
1460 {
1461
1462         return (ENOSYS);
1463 }
1464
1465 int
1466 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1467 {
1468         struct trapframe *frame = td->td_frame;
1469         struct pcb *pcb = td->td_pcb;
1470
1471         /* Save the high FP registers. */
1472         ia64_highfp_save(td);
1473
1474         fpregs->fpr_scratch = frame->tf_scratch_fp;
1475         save_callee_saved_fp(&fpregs->fpr_preserved);
1476         fpregs->fpr_high = pcb->pcb_high_fp;
1477         return (0);
1478 }
1479
1480 int
1481 set_fpregs(struct thread *td, struct fpreg *fpregs)
1482 {
1483         struct trapframe *frame = td->td_frame;
1484         struct pcb *pcb = td->td_pcb;
1485
1486         /* Throw away the high FP registers (should be redundant). */
1487         ia64_highfp_drop(td);
1488
1489         frame->tf_scratch_fp = fpregs->fpr_scratch;
1490         restore_callee_saved_fp(&fpregs->fpr_preserved);
1491         pcb->pcb_high_fp = fpregs->fpr_high;
1492         return (0);
1493 }
1494
1495 void
1496 ia64_sync_icache(vm_offset_t va, vm_offset_t sz)
1497 {
1498         vm_offset_t lim;
1499
1500         if (!ia64_sync_icache_needed)
1501                 return;
1502
1503         lim = va + sz;
1504         while (va < lim) {
1505                 ia64_fc_i(va);
1506                 va += 32;       /* XXX */
1507         }
1508
1509         ia64_sync_i();
1510         ia64_srlz_i();
1511 }