]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/ia64/ia64/machdep.c
MFC: r227309 (partial)
[FreeBSD/stable/9.git] / sys / ia64 / ia64 / machdep.c
1 /*-
2  * Copyright (c) 2003,2004 Marcel Moolenaar
3  * Copyright (c) 2000,2001 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include "opt_compat.h"
32 #include "opt_ddb.h"
33 #include "opt_kstack_pages.h"
34 #include "opt_sched.h"
35
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/buf.h>
41 #include <sys/bus.h>
42 #include <sys/cons.h>
43 #include <sys/cpu.h>
44 #include <sys/eventhandler.h>
45 #include <sys/exec.h>
46 #include <sys/imgact.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/linker.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/msgbuf.h>
54 #include <sys/pcpu.h>
55 #include <sys/ptrace.h>
56 #include <sys/random.h>
57 #include <sys/reboot.h>
58 #include <sys/sched.h>
59 #include <sys/signalvar.h>
60 #include <sys/syscall.h>
61 #include <sys/syscallsubr.h>
62 #include <sys/sysctl.h>
63 #include <sys/sysproto.h>
64 #include <sys/ucontext.h>
65 #include <sys/uio.h>
66 #include <sys/uuid.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69
70 #include <ddb/ddb.h>
71
72 #include <net/netisr.h>
73
74 #include <vm/vm.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_pager.h>
81
82 #include <machine/bootinfo.h>
83 #include <machine/cpu.h>
84 #include <machine/efi.h>
85 #include <machine/elf.h>
86 #include <machine/fpu.h>
87 #include <machine/intr.h>
88 #include <machine/mca.h>
89 #include <machine/md_var.h>
90 #include <machine/pal.h>
91 #include <machine/pcb.h>
92 #include <machine/reg.h>
93 #include <machine/sal.h>
94 #include <machine/sigframe.h>
95 #ifdef SMP
96 #include <machine/smp.h>
97 #endif
98 #include <machine/unwind.h>
99 #include <machine/vmparam.h>
100
101 /*
102  * For atomicity reasons, we demand that pc_curthread is the first
103  * field in the struct pcpu. It allows us to read the pointer with
104  * a single atomic instruction:
105  *      ld8 %curthread = [r13]
106  * Otherwise we would first have to calculate the load address and
107  * store the result in a temporary register and that for the load:
108  *      add %temp = %offsetof(struct pcpu), r13
109  *      ld8 %curthread = [%temp]
110  * A context switch inbetween the add and the ld8 could have the
111  * thread migrate to a different core. In that case,  %curthread
112  * would be the thread running on the original core and not actually
113  * the current thread.
114  */
115 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
116
117 static SYSCTL_NODE(_hw, OID_AUTO, freq, CTLFLAG_RD, 0, "");
118 static SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RD, 0, "");
119
120 static u_int bus_freq;
121 SYSCTL_UINT(_hw_freq, OID_AUTO, bus, CTLFLAG_RD, &bus_freq, 0,
122     "Bus clock frequency");
123
124 static u_int cpu_freq;
125 SYSCTL_UINT(_hw_freq, OID_AUTO, cpu, CTLFLAG_RD, &cpu_freq, 0,
126     "CPU clock frequency");
127
128 static u_int itc_freq;
129 SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTLFLAG_RD, &itc_freq, 0,
130     "ITC frequency");
131
132 int cold = 1;
133
134 struct bootinfo *bootinfo;
135
136 struct pcpu pcpu0;
137
138 extern u_int64_t kernel_text[], _end[];
139
140 extern u_int64_t ia64_gateway_page[];
141 extern u_int64_t break_sigtramp[];
142 extern u_int64_t epc_sigtramp[];
143
144 struct fpswa_iface *fpswa_iface;
145
146 vm_size_t ia64_pal_size;
147 vm_paddr_t ia64_pal_base;
148 vm_offset_t ia64_port_base;
149
150 u_int64_t ia64_lapic_addr = PAL_PIB_DEFAULT_ADDR;
151
152 struct ia64_pib *ia64_pib;
153
154 static int ia64_sync_icache_needed;
155
156 char machine[] = MACHINE;
157 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
158
159 static char cpu_model[64];
160 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0,
161     "The CPU model name");
162
163 static char cpu_family[64];
164 SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0,
165     "The CPU family name");
166
167 #ifdef DDB
168 extern vm_offset_t ksym_start, ksym_end;
169 #endif
170
171
172 struct msgbuf *msgbufp = NULL;
173
174 /* Other subsystems (e.g., ACPI) can hook this later. */
175 void (*cpu_idle_hook)(void) = NULL;
176
177 long Maxmem = 0;
178 long realmem = 0;
179
180 #define PHYSMAP_SIZE    (2 * VM_PHYSSEG_MAX)
181
182 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
183
184 /* must be 2 less so 0 0 can signal end of chunks */
185 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
186
187 struct kva_md_info kmi;
188
189 #define Mhz     1000000L
190 #define Ghz     (1000L*Mhz)
191
192 static void
193 identifycpu(void)
194 {
195         char vendor[17];
196         char *family_name, *model_name;
197         u_int64_t features, tmp;
198         int number, revision, model, family, archrev;
199
200         /*
201          * Assumes little-endian.
202          */
203         *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0);
204         *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1);
205         vendor[16] = '\0';
206
207         tmp = ia64_get_cpuid(3);
208         number = (tmp >> 0) & 0xff;
209         revision = (tmp >> 8) & 0xff;
210         model = (tmp >> 16) & 0xff;
211         family = (tmp >> 24) & 0xff;
212         archrev = (tmp >> 32) & 0xff;
213
214         family_name = model_name = "unknown";
215         switch (family) {
216         case 0x07:
217                 family_name = "Itanium";
218                 model_name = "Merced";
219                 break;
220         case 0x1f:
221                 family_name = "Itanium 2";
222                 switch (model) {
223                 case 0x00:
224                         model_name = "McKinley";
225                         break;
226                 case 0x01:
227                         /*
228                          * Deerfield is a low-voltage variant based on the
229                          * Madison core. We need circumstantial evidence
230                          * (i.e. the clock frequency) to identify those.
231                          * Allow for roughly 1% error margin.
232                          */
233                         if (cpu_freq > 990 && cpu_freq < 1010)
234                                 model_name = "Deerfield";
235                         else
236                                 model_name = "Madison";
237                         break;
238                 case 0x02:
239                         model_name = "Madison II";
240                         break;
241                 }
242                 break;
243         case 0x20:
244                 ia64_sync_icache_needed = 1;
245
246                 family_name = "Itanium 2";
247                 switch (model) {
248                 case 0x00:
249                         model_name = "Montecito";
250                         break;
251                 case 0x01:
252                         model_name = "Montvale";
253                         break;
254                 }
255                 break;
256         }
257         snprintf(cpu_family, sizeof(cpu_family), "%s", family_name);
258         snprintf(cpu_model, sizeof(cpu_model), "%s", model_name);
259
260         features = ia64_get_cpuid(4);
261
262         printf("CPU: %s (", model_name);
263         if (cpu_freq)
264                 printf("%u MHz ", cpu_freq);
265         printf("%s)\n", family_name);
266         printf("  Origin = \"%s\"  Revision = %d\n", vendor, revision);
267         printf("  Features = 0x%b\n", (u_int32_t) features,
268             "\020"
269             "\001LB"    /* long branch (brl) instruction. */
270             "\002SD"    /* Spontaneous deferral. */
271             "\003AO"    /* 16-byte atomic operations (ld, st, cmpxchg). */ );
272 }
273
274 static void
275 cpu_startup(void *dummy)
276 {
277         char nodename[16];
278         struct pcpu *pc;
279         struct pcpu_stats *pcs;
280
281         /*
282          * Good {morning,afternoon,evening,night}.
283          */
284         identifycpu();
285
286 #ifdef PERFMON
287         perfmon_init();
288 #endif
289         printf("real memory  = %ld (%ld MB)\n", ia64_ptob(Maxmem),
290             ia64_ptob(Maxmem) / 1048576);
291         realmem = Maxmem;
292
293         /*
294          * Display any holes after the first chunk of extended memory.
295          */
296         if (bootverbose) {
297                 int indx;
298
299                 printf("Physical memory chunk(s):\n");
300                 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
301                         long size1 = phys_avail[indx + 1] - phys_avail[indx];
302
303                         printf("0x%08lx - 0x%08lx, %ld bytes (%ld pages)\n",
304                             phys_avail[indx], phys_avail[indx + 1] - 1, size1,
305                             size1 >> PAGE_SHIFT);
306                 }
307         }
308
309         vm_ksubmap_init(&kmi);
310
311         printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
312             ptoa(cnt.v_free_count) / 1048576);
313  
314         if (fpswa_iface == NULL)
315                 printf("Warning: no FPSWA package supplied\n");
316         else
317                 printf("FPSWA Revision = 0x%lx, Entry = %p\n",
318                     (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa);
319
320         /*
321          * Set up buffers, so they can be used to read disk labels.
322          */
323         bufinit();
324         vm_pager_bufferinit();
325
326         /*
327          * Traverse the MADT to discover IOSAPIC and Local SAPIC
328          * information.
329          */
330         ia64_probe_sapics();
331         ia64_pib = pmap_mapdev(ia64_lapic_addr, sizeof(*ia64_pib));
332
333         ia64_mca_init();
334
335         /*
336          * Create sysctl tree for per-CPU information.
337          */
338         STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
339                 snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid);
340                 sysctl_ctx_init(&pc->pc_md.sysctl_ctx);
341                 pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx,
342                     SYSCTL_STATIC_CHILDREN(_machdep_cpu), OID_AUTO, nodename,
343                     CTLFLAG_RD, NULL, "");
344                 if (pc->pc_md.sysctl_tree == NULL)
345                         continue;
346
347                 pcs = &pc->pc_md.stats;
348
349                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
350                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
351                     "nasts", CTLFLAG_RD, &pcs->pcs_nasts,
352                     "Number of IPI_AST interrupts");
353
354                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
355                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
356                     "nclks", CTLFLAG_RD, &pcs->pcs_nclks,
357                     "Number of clock interrupts");
358
359                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
360                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
361                     "nextints", CTLFLAG_RD, &pcs->pcs_nextints,
362                     "Number of ExtINT interrupts");
363
364                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
365                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
366                     "nhardclocks", CTLFLAG_RD, &pcs->pcs_nhardclocks,
367                     "Number of IPI_HARDCLOCK interrupts");
368
369                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
370                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
371                     "nhighfps", CTLFLAG_RD, &pcs->pcs_nhighfps,
372                     "Number of IPI_HIGH_FP interrupts");
373
374                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
375                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
376                     "nhwints", CTLFLAG_RD, &pcs->pcs_nhwints,
377                     "Number of hardware (device) interrupts");
378
379                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
380                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
381                     "npreempts", CTLFLAG_RD, &pcs->pcs_npreempts,
382                     "Number of IPI_PREEMPT interrupts");
383
384                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
385                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
386                     "nrdvs", CTLFLAG_RD, &pcs->pcs_nrdvs,
387                     "Number of IPI_RENDEZVOUS interrupts");
388
389                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
390                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
391                     "nstops", CTLFLAG_RD, &pcs->pcs_nstops,
392                     "Number of IPI_STOP interrupts");
393
394                 SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx,
395                     SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO,
396                     "nstrays", CTLFLAG_RD, &pcs->pcs_nstrays,
397                     "Number of stray interrupts");
398         }
399 }
400 SYSINIT(cpu_startup, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
401
402 void
403 cpu_flush_dcache(void *ptr, size_t len)
404 {
405         vm_offset_t lim, va;
406
407         va = (uintptr_t)ptr & ~31;
408         lim = (uintptr_t)ptr + len;
409         while (va < lim) {
410                 ia64_fc(va);
411                 va += 32;
412         }
413
414         ia64_srlz_d();
415 }
416
417 /* Get current clock frequency for the given cpu id. */
418 int
419 cpu_est_clockrate(int cpu_id, uint64_t *rate)
420 {
421
422         if (pcpu_find(cpu_id) == NULL || rate == NULL)
423                 return (EINVAL);
424         *rate = (u_long)cpu_freq * 1000000ul;
425         return (0);
426 }
427
428 void
429 cpu_halt()
430 {
431
432         efi_reset_system();
433 }
434
435 void
436 cpu_idle(int busy)
437 {
438         register_t ie;
439
440         if (!busy) {
441                 critical_enter();
442                 cpu_idleclock();
443         }
444
445         ie = intr_disable();
446         KASSERT(ie != 0, ("%s called with interrupts disabled\n", __func__));
447
448         if (sched_runnable())
449                 ia64_enable_intr();
450         else if (cpu_idle_hook != NULL) {
451                 (*cpu_idle_hook)();
452                 /* The hook must enable interrupts! */
453         } else {
454                 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0);
455                 ia64_enable_intr();
456         }
457
458         if (!busy) {
459                 cpu_activeclock();
460                 critical_exit();
461         }
462 }
463
464 int
465 cpu_idle_wakeup(int cpu)
466 {
467
468         return (0);
469 }
470
471 void
472 cpu_reset()
473 {
474
475         efi_reset_system();
476 }
477
478 void
479 cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
480 {
481         struct pcb *oldpcb, *newpcb;
482
483         oldpcb = old->td_pcb;
484 #ifdef COMPAT_FREEBSD32
485         ia32_savectx(oldpcb);
486 #endif
487         if (PCPU_GET(fpcurthread) == old)
488                 old->td_frame->tf_special.psr |= IA64_PSR_DFH;
489         if (!savectx(oldpcb)) {
490                 newpcb = new->td_pcb;
491                 oldpcb->pcb_current_pmap =
492                     pmap_switch(newpcb->pcb_current_pmap);
493
494                 atomic_store_rel_ptr(&old->td_lock, mtx);
495
496 #if defined(SCHED_ULE) && defined(SMP)
497                 while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
498                         cpu_spinwait();
499 #endif
500
501                 PCPU_SET(curthread, new);
502
503 #ifdef COMPAT_FREEBSD32
504                 ia32_restorectx(newpcb);
505 #endif
506
507                 if (PCPU_GET(fpcurthread) == new)
508                         new->td_frame->tf_special.psr &= ~IA64_PSR_DFH;
509                 restorectx(newpcb);
510                 /* We should not get here. */
511                 panic("cpu_switch: restorectx() returned");
512                 /* NOTREACHED */
513         }
514 }
515
516 void
517 cpu_throw(struct thread *old __unused, struct thread *new)
518 {
519         struct pcb *newpcb;
520
521         newpcb = new->td_pcb;
522         (void)pmap_switch(newpcb->pcb_current_pmap);
523
524 #if defined(SCHED_ULE) && defined(SMP)
525         while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock)
526                 cpu_spinwait();
527 #endif
528
529         PCPU_SET(curthread, new);
530
531 #ifdef COMPAT_FREEBSD32
532         ia32_restorectx(newpcb);
533 #endif
534
535         restorectx(newpcb);
536         /* We should not get here. */
537         panic("cpu_throw: restorectx() returned");
538         /* NOTREACHED */
539 }
540
541 void
542 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
543 {
544
545         /*
546          * Set pc_acpi_id to "uninitialized".
547          * See sys/dev/acpica/acpi_cpu.c
548          */
549         pcpu->pc_acpi_id = 0xffffffff;
550 }
551
552 void
553 spinlock_enter(void)
554 {
555         struct thread *td;
556         int intr;
557
558         td = curthread;
559         if (td->td_md.md_spinlock_count == 0) {
560                 intr = intr_disable();
561                 td->td_md.md_spinlock_count = 1;
562                 td->td_md.md_saved_intr = intr;
563         } else
564                 td->td_md.md_spinlock_count++;
565         critical_enter();
566 }
567
568 void
569 spinlock_exit(void)
570 {
571         struct thread *td;
572         int intr;
573
574         td = curthread;
575         critical_exit();
576         intr = td->td_md.md_saved_intr;
577         td->td_md.md_spinlock_count--;
578         if (td->td_md.md_spinlock_count == 0)
579                 intr_restore(intr);
580 }
581
582 void
583 map_vhpt(uintptr_t vhpt)
584 {
585         pt_entry_t pte;
586         uint64_t psr;
587
588         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
589             PTE_PL_KERN | PTE_AR_RW;
590         pte |= vhpt & PTE_PPN_MASK;
591
592         __asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
593             "r"(pmap_vhpt_log2size << 2));
594
595         __asm __volatile("mov   %0=psr" : "=r"(psr));
596         __asm __volatile("rsm   psr.ic|psr.i");
597         ia64_srlz_i();
598         ia64_set_ifa(vhpt);
599         ia64_set_itir(pmap_vhpt_log2size << 2);
600         ia64_srlz_d();
601         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
602         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
603         ia64_srlz_i();
604 }
605
606 void
607 map_pal_code(void)
608 {
609         pt_entry_t pte;
610         vm_offset_t va;
611         vm_size_t sz;
612         uint64_t psr;
613         u_int shft;
614
615         if (ia64_pal_size == 0)
616                 return;
617
618         va = IA64_PHYS_TO_RR7(ia64_pal_base);
619
620         sz = ia64_pal_size;
621         shft = 0;
622         while (sz > 1) {
623                 shft++;
624                 sz >>= 1;
625         }
626
627         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
628             PTE_PL_KERN | PTE_AR_RWX;
629         pte |= ia64_pal_base & PTE_PPN_MASK;
630
631         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: "r"(va), "r"(shft<<2));
632
633         __asm __volatile("mov   %0=psr" : "=r"(psr));
634         __asm __volatile("rsm   psr.ic|psr.i");
635         ia64_srlz_i();
636         ia64_set_ifa(va);
637         ia64_set_itir(shft << 2);
638         ia64_srlz_d();
639         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(4), "r"(pte));
640         ia64_srlz_d();
641         __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
642         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
643         ia64_srlz_i();
644 }
645
646 void
647 map_gateway_page(void)
648 {
649         pt_entry_t pte;
650         uint64_t psr;
651
652         pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
653             PTE_PL_KERN | PTE_AR_X_RX;
654         pte |= ia64_tpa((uint64_t)ia64_gateway_page) & PTE_PPN_MASK;
655
656         __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
657             "r"(VM_MAXUSER_ADDRESS), "r"(PAGE_SHIFT << 2));
658
659         __asm __volatile("mov   %0=psr" : "=r"(psr));
660         __asm __volatile("rsm   psr.ic|psr.i");
661         ia64_srlz_i();
662         ia64_set_ifa(VM_MAXUSER_ADDRESS);
663         ia64_set_itir(PAGE_SHIFT << 2);
664         ia64_srlz_d();
665         __asm __volatile("itr.d dtr[%0]=%1" :: "r"(5), "r"(pte));
666         ia64_srlz_d();
667         __asm __volatile("itr.i itr[%0]=%1" :: "r"(2), "r"(pte));
668         __asm __volatile("mov   psr.l=%0" :: "r" (psr));
669         ia64_srlz_i();
670
671         /* Expose the mapping to userland in ar.k5 */
672         ia64_set_k5(VM_MAXUSER_ADDRESS);
673 }
674
675 static u_int
676 freq_ratio(u_long base, u_long ratio)
677 {
678         u_long f;
679
680         f = (base * (ratio >> 32)) / (ratio & 0xfffffffful);
681         return ((f + 500000) / 1000000);
682 }
683
684 static void
685 calculate_frequencies(void)
686 {
687         struct ia64_sal_result sal;
688         struct ia64_pal_result pal;
689         register_t ie;
690
691         ie = intr_disable();
692         sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0);
693         pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0);
694         intr_restore(ie);
695
696         if (sal.sal_status == 0 && pal.pal_status == 0) {
697                 if (bootverbose) {
698                         printf("Platform clock frequency %ld Hz\n",
699                                sal.sal_result[0]);
700                         printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, "
701                                "ITC ratio %ld/%ld\n",
702                                pal.pal_result[0] >> 32,
703                                pal.pal_result[0] & ((1L << 32) - 1),
704                                pal.pal_result[1] >> 32,
705                                pal.pal_result[1] & ((1L << 32) - 1),
706                                pal.pal_result[2] >> 32,
707                                pal.pal_result[2] & ((1L << 32) - 1));
708                 }
709                 cpu_freq = freq_ratio(sal.sal_result[0], pal.pal_result[0]);
710                 bus_freq = freq_ratio(sal.sal_result[0], pal.pal_result[1]);
711                 itc_freq = freq_ratio(sal.sal_result[0], pal.pal_result[2]);
712         }
713 }
714
715 struct ia64_init_return
716 ia64_init(void)
717 {
718         struct ia64_init_return ret;
719         int phys_avail_cnt;
720         vm_offset_t kernstart, kernend;
721         vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
722         char *p;
723         struct efi_md *md;
724         int metadata_missing;
725
726         /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */
727
728         /*
729          * TODO: Disable interrupts, floating point etc.
730          * Maybe flush cache and tlb
731          */
732         ia64_set_fpsr(IA64_FPSR_DEFAULT);
733
734         /*
735          * TODO: Get critical system information (if possible, from the
736          * information provided by the boot program).
737          */
738
739         /*
740          * Look for the I/O ports first - we need them for console
741          * probing.
742          */
743         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
744                 switch (md->md_type) {
745                 case EFI_MD_TYPE_IOPORT:
746                         ia64_port_base = (uintptr_t)pmap_mapdev(md->md_phys,
747                             md->md_pages * EFI_PAGE_SIZE);
748                         break;
749                 case EFI_MD_TYPE_PALCODE:
750                         ia64_pal_size = md->md_pages * EFI_PAGE_SIZE;
751                         ia64_pal_base = md->md_phys;
752                         break;
753                 }
754         }
755
756         metadata_missing = 0;
757         if (bootinfo->bi_modulep)
758                 preload_metadata = (caddr_t)bootinfo->bi_modulep;
759         else
760                 metadata_missing = 1;
761
762         if (envmode == 0 && bootinfo->bi_envp)
763                 kern_envp = (caddr_t)bootinfo->bi_envp;
764         else
765                 kern_envp = static_env;
766
767         /*
768          * Look at arguments passed to us and compute boothowto.
769          */
770         boothowto = bootinfo->bi_boothowto;
771
772         if (boothowto & RB_VERBOSE)
773                 bootverbose = 1;
774
775         /*
776          * Find the beginning and end of the kernel.
777          */
778         kernstart = trunc_page(kernel_text);
779 #ifdef DDB
780         ksym_start = bootinfo->bi_symtab;
781         ksym_end = bootinfo->bi_esymtab;
782         kernend = (vm_offset_t)round_page(ksym_end);
783 #else
784         kernend = (vm_offset_t)round_page(_end);
785 #endif
786         /* But if the bootstrap tells us otherwise, believe it! */
787         if (bootinfo->bi_kernend)
788                 kernend = round_page(bootinfo->bi_kernend);
789
790         /*
791          * Region 6 is direct mapped UC and region 7 is direct mapped
792          * WC. The details of this is controlled by the Alt {I,D}TLB
793          * handlers. Here we just make sure that they have the largest
794          * possible page size to minimise TLB usage.
795          */
796         ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (PAGE_SHIFT << 2));
797         ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (PAGE_SHIFT << 2));
798         ia64_srlz_d();
799
800         /*
801          * Wire things up so we can call the firmware.
802          */
803         map_pal_code();
804         efi_boot_minimal(bootinfo->bi_systab);
805         ia64_xiv_init();
806         ia64_sal_init();
807         calculate_frequencies();
808
809         set_cputicker(ia64_get_itc, (u_long)itc_freq * 1000000, 0);
810
811         /*
812          * Setup the PCPU data for the bootstrap processor. It is needed
813          * by printf(). Also, since printf() has critical sections, we
814          * need to initialize at least pc_curthread.
815          */
816         pcpup = &pcpu0;
817         ia64_set_k4((u_int64_t)pcpup);
818         pcpu_init(pcpup, 0, sizeof(pcpu0));
819         dpcpu_init((void *)kernend, 0);
820         PCPU_SET(md.lid, ia64_get_lid());
821         kernend += DPCPU_SIZE;
822         PCPU_SET(curthread, &thread0);
823
824         /*
825          * Initialize the console before we print anything out.
826          */
827         cninit();
828
829         /* OUTPUT NOW ALLOWED */
830
831         if (metadata_missing)
832                 printf("WARNING: loader(8) metadata is missing!\n");
833
834         /* Get FPSWA interface */
835         fpswa_iface = (bootinfo->bi_fpswa == 0) ? NULL :
836             (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo->bi_fpswa);
837
838         /* Init basic tunables, including hz */
839         init_param1();
840
841         p = getenv("kernelname");
842         if (p != NULL) {
843                 strlcpy(kernelname, p, sizeof(kernelname));
844                 freeenv(p);
845         }
846
847         kernstartpfn = atop(IA64_RR_MASK(kernstart));
848         kernendpfn = atop(IA64_RR_MASK(kernend));
849
850         /*
851          * Size the memory regions and load phys_avail[] with the results.
852          */
853
854         /*
855          * Find out how much memory is available, by looking at
856          * the memory descriptors.
857          */
858
859 #ifdef DEBUG_MD
860         printf("Memory descriptor count: %d\n", mdcount);
861 #endif
862
863         phys_avail_cnt = 0;
864         for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) {
865 #ifdef DEBUG_MD
866                 printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md,
867                     md->md_type, md->md_phys, md->md_pages);
868 #endif
869
870                 pfn0 = ia64_btop(round_page(md->md_phys));
871                 pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096));
872                 if (pfn1 <= pfn0)
873                         continue;
874
875                 if (md->md_type != EFI_MD_TYPE_FREE)
876                         continue;
877
878                 /*
879                  * We have a memory descriptor that describes conventional
880                  * memory that is for general use. We must determine if the
881                  * loader has put the kernel in this region.
882                  */
883                 physmem += (pfn1 - pfn0);
884                 if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) {
885                         /*
886                          * Must compute the location of the kernel
887                          * within the segment.
888                          */
889 #ifdef DEBUG_MD
890                         printf("Descriptor %p contains kernel\n", mp);
891 #endif
892                         if (pfn0 < kernstartpfn) {
893                                 /*
894                                  * There is a chunk before the kernel.
895                                  */
896 #ifdef DEBUG_MD
897                                 printf("Loading chunk before kernel: "
898                                        "0x%lx / 0x%lx\n", pfn0, kernstartpfn);
899 #endif
900                                 phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
901                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn);
902                                 phys_avail_cnt += 2;
903                         }
904                         if (kernendpfn < pfn1) {
905                                 /*
906                                  * There is a chunk after the kernel.
907                                  */
908 #ifdef DEBUG_MD
909                                 printf("Loading chunk after kernel: "
910                                        "0x%lx / 0x%lx\n", kernendpfn, pfn1);
911 #endif
912                                 phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn);
913                                 phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
914                                 phys_avail_cnt += 2;
915                         }
916                 } else {
917                         /*
918                          * Just load this cluster as one chunk.
919                          */
920 #ifdef DEBUG_MD
921                         printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i,
922                                pfn0, pfn1);
923 #endif
924                         phys_avail[phys_avail_cnt] = ia64_ptob(pfn0);
925                         phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1);
926                         phys_avail_cnt += 2;
927                         
928                 }
929         }
930         phys_avail[phys_avail_cnt] = 0;
931
932         Maxmem = physmem;
933         init_param2(physmem);
934
935         /*
936          * Initialize error message buffer (at end of core).
937          */
938         msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
939         msgbufinit(msgbufp, msgbufsize);
940
941         proc_linkup0(&proc0, &thread0);
942         /*
943          * Init mapping for kernel stack for proc 0
944          */
945         thread0.td_kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);
946         thread0.td_kstack_pages = KSTACK_PAGES;
947
948         mutex_init();
949
950         /*
951          * Initialize the rest of proc 0's PCB.
952          *
953          * Set the kernel sp, reserving space for an (empty) trapframe,
954          * and make proc0's trapframe pointer point to it for sanity.
955          * Initialise proc0's backing store to start after u area.
956          */
957         cpu_thread_alloc(&thread0);
958         thread0.td_frame->tf_flags = FRAME_SYSCALL;
959         thread0.td_pcb->pcb_special.sp =
960             (u_int64_t)thread0.td_frame - 16;
961         thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack;
962
963         /*
964          * Initialize the virtual memory system.
965          */
966         pmap_bootstrap();
967
968         /*
969          * Initialize debuggers, and break into them if appropriate.
970          */
971         kdb_init();
972
973 #ifdef KDB
974         if (boothowto & RB_KDB)
975                 kdb_enter(KDB_WHY_BOOTFLAGS,
976                     "Boot flags requested debugger\n");
977 #endif
978
979         ia64_set_tpr(0);
980         ia64_srlz_d();
981
982         ret.bspstore = thread0.td_pcb->pcb_special.bspstore;
983         ret.sp = thread0.td_pcb->pcb_special.sp;
984         return (ret);
985 }
986
987 uint64_t
988 ia64_get_hcdp(void)
989 {
990
991         return (bootinfo->bi_hcdp);
992 }
993
994 void
995 bzero(void *buf, size_t len)
996 {
997         caddr_t p = buf;
998
999         while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
1000                 *p++ = 0;
1001                 len--;
1002         }
1003         while (len >= sizeof(u_long) * 8) {
1004                 *(u_long*) p = 0;
1005                 *((u_long*) p + 1) = 0;
1006                 *((u_long*) p + 2) = 0;
1007                 *((u_long*) p + 3) = 0;
1008                 len -= sizeof(u_long) * 8;
1009                 *((u_long*) p + 4) = 0;
1010                 *((u_long*) p + 5) = 0;
1011                 *((u_long*) p + 6) = 0;
1012                 *((u_long*) p + 7) = 0;
1013                 p += sizeof(u_long) * 8;
1014         }
1015         while (len >= sizeof(u_long)) {
1016                 *(u_long*) p = 0;
1017                 len -= sizeof(u_long);
1018                 p += sizeof(u_long);
1019         }
1020         while (len) {
1021                 *p++ = 0;
1022                 len--;
1023         }
1024 }
1025
1026 u_int
1027 ia64_itc_freq(void)
1028 {
1029
1030         return (itc_freq);
1031 }
1032
1033 void
1034 DELAY(int n)
1035 {
1036         u_int64_t start, end, now;
1037
1038         sched_pin();
1039
1040         start = ia64_get_itc();
1041         end = start + itc_freq * n;
1042         /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */
1043         do {
1044                 now = ia64_get_itc();
1045         } while (now < end || (now > start && end < start));
1046
1047         sched_unpin();
1048 }
1049
1050 /*
1051  * Send an interrupt (signal) to a process.
1052  */
1053 void
1054 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
1055 {
1056         struct proc *p;
1057         struct thread *td;
1058         struct trapframe *tf;
1059         struct sigacts *psp;
1060         struct sigframe sf, *sfp;
1061         u_int64_t sbs, sp;
1062         int oonstack;
1063         int sig;
1064         u_long code;
1065
1066         td = curthread;
1067         p = td->td_proc;
1068         PROC_LOCK_ASSERT(p, MA_OWNED);
1069         sig = ksi->ksi_signo;
1070         code = ksi->ksi_code;
1071         psp = p->p_sigacts;
1072         mtx_assert(&psp->ps_mtx, MA_OWNED);
1073         tf = td->td_frame;
1074         sp = tf->tf_special.sp;
1075         oonstack = sigonstack(sp);
1076         sbs = 0;
1077
1078         /* save user context */
1079         bzero(&sf, sizeof(struct sigframe));
1080         sf.sf_uc.uc_sigmask = *mask;
1081         sf.sf_uc.uc_stack = td->td_sigstk;
1082         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
1083             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
1084
1085         /*
1086          * Allocate and validate space for the signal handler
1087          * context. Note that if the stack is in P0 space, the
1088          * call to grow() is a nop, and the useracc() check
1089          * will fail if the process has not already allocated
1090          * the space with a `brk'.
1091          */
1092         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
1093             SIGISMEMBER(psp->ps_sigonstack, sig)) {
1094                 sbs = (u_int64_t)td->td_sigstk.ss_sp;
1095                 sbs = (sbs + 15) & ~15;
1096                 sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size);
1097 #if defined(COMPAT_43)
1098                 td->td_sigstk.ss_flags |= SS_ONSTACK;
1099 #endif
1100         } else
1101                 sfp = (struct sigframe *)sp;
1102         sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15);
1103
1104         /* Fill in the siginfo structure for POSIX handlers. */
1105         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
1106                 sf.sf_si = ksi->ksi_info;
1107                 sf.sf_si.si_signo = sig;
1108                 /*
1109                  * XXX this shouldn't be here after code in trap.c
1110                  * is fixed
1111                  */
1112                 sf.sf_si.si_addr = (void*)tf->tf_special.ifa;
1113                 code = (u_int64_t)&sfp->sf_si;
1114         }
1115
1116         mtx_unlock(&psp->ps_mtx);
1117         PROC_UNLOCK(p);
1118
1119         get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
1120
1121         /* Copy the frame out to userland. */
1122         if (copyout(&sf, sfp, sizeof(sf)) != 0) {
1123                 /*
1124                  * Process has trashed its stack; give it an illegal
1125                  * instruction to halt it in its tracks.
1126                  */
1127                 PROC_LOCK(p);
1128                 sigexit(td, SIGILL);
1129                 return;
1130         }
1131
1132         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {
1133                 tf->tf_special.psr &= ~IA64_PSR_RI;
1134                 tf->tf_special.iip = ia64_get_k5() +
1135                     ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page);
1136         } else
1137                 tf->tf_special.iip = ia64_get_k5() +
1138                     ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page);
1139
1140         /*
1141          * Setup the trapframe to return to the signal trampoline. We pass
1142          * information to the trampoline in the following registers:
1143          *
1144          *      gp      new backing store or NULL
1145          *      r8      signal number
1146          *      r9      signal code or siginfo pointer
1147          *      r10     signal handler (function descriptor)
1148          */
1149         tf->tf_special.sp = (u_int64_t)sfp - 16;
1150         tf->tf_special.gp = sbs;
1151         tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore;
1152         tf->tf_special.ndirty = 0;
1153         tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat;
1154         tf->tf_scratch.gr8 = sig;
1155         tf->tf_scratch.gr9 = code;
1156         tf->tf_scratch.gr10 = (u_int64_t)catcher;
1157
1158         PROC_LOCK(p);
1159         mtx_lock(&psp->ps_mtx);
1160 }
1161
1162 /*
1163  * System call to cleanup state after a signal
1164  * has been taken.  Reset signal mask and
1165  * stack state from context left by sendsig (above).
1166  * Return to previous pc and psl as specified by
1167  * context left by sendsig. Check carefully to
1168  * make sure that the user has not modified the
1169  * state to gain improper privileges.
1170  *
1171  * MPSAFE
1172  */
1173 int
1174 sys_sigreturn(struct thread *td,
1175         struct sigreturn_args /* {
1176                 ucontext_t *sigcntxp;
1177         } */ *uap)
1178 {
1179         ucontext_t uc;
1180         struct trapframe *tf;
1181         struct pcb *pcb;
1182
1183         tf = td->td_frame;
1184         pcb = td->td_pcb;
1185
1186         /*
1187          * Fetch the entire context structure at once for speed.
1188          * We don't use a normal argument to simplify RSE handling.
1189          */
1190         if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc)))
1191                 return (EFAULT);
1192
1193         set_mcontext(td, &uc.uc_mcontext);
1194
1195 #if defined(COMPAT_43)
1196         if (sigonstack(tf->tf_special.sp))
1197                 td->td_sigstk.ss_flags |= SS_ONSTACK;
1198         else
1199                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1200 #endif
1201         kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
1202
1203         return (EJUSTRETURN);
1204 }
1205
1206 #ifdef COMPAT_FREEBSD4
1207 int
1208 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
1209 {
1210
1211         return sys_sigreturn(td, (struct sigreturn_args *)uap);
1212 }
1213 #endif
1214
1215 /*
1216  * Construct a PCB from a trapframe. This is called from kdb_trap() where
1217  * we want to start a backtrace from the function that caused us to enter
1218  * the debugger. We have the context in the trapframe, but base the trace
1219  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1220  * enough for a backtrace.
1221  */
1222 void
1223 makectx(struct trapframe *tf, struct pcb *pcb)
1224 {
1225
1226         pcb->pcb_special = tf->tf_special;
1227         pcb->pcb_special.__spare = ~0UL;        /* XXX see unwind.c */
1228         save_callee_saved(&pcb->pcb_preserved);
1229         save_callee_saved_fp(&pcb->pcb_preserved_fp);
1230 }
1231
1232 int
1233 ia64_flush_dirty(struct thread *td, struct _special *r)
1234 {
1235         struct iovec iov;
1236         struct uio uio;
1237         uint64_t bspst, kstk, rnat;
1238         int error, locked;
1239
1240         if (r->ndirty == 0)
1241                 return (0);
1242
1243         kstk = td->td_kstack + (r->bspstore & 0x1ffUL);
1244         if (td == curthread) {
1245                 __asm __volatile("mov   ar.rsc=0;;");
1246                 __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
1247                 /* Make sure we have all the user registers written out. */
1248                 if (bspst - kstk < r->ndirty) {
1249                         __asm __volatile("flushrs;;");
1250                         __asm __volatile("mov   %0=ar.bspstore" : "=r"(bspst));
1251                 }
1252                 __asm __volatile("mov   %0=ar.rnat;;" : "=r"(rnat));
1253                 __asm __volatile("mov   ar.rsc=3");
1254                 error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty);
1255                 kstk += r->ndirty;
1256                 r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL))
1257                     ? *(uint64_t*)(kstk | 0x1f8L) : rnat;
1258         } else {
1259                 locked = PROC_LOCKED(td->td_proc);
1260                 if (!locked)
1261                         PHOLD(td->td_proc);
1262                 iov.iov_base = (void*)(uintptr_t)kstk;
1263                 iov.iov_len = r->ndirty;
1264                 uio.uio_iov = &iov;
1265                 uio.uio_iovcnt = 1;
1266                 uio.uio_offset = r->bspstore;
1267                 uio.uio_resid = r->ndirty;
1268                 uio.uio_segflg = UIO_SYSSPACE;
1269                 uio.uio_rw = UIO_WRITE;
1270                 uio.uio_td = td;
1271                 error = proc_rwmem(td->td_proc, &uio);
1272                 /*
1273                  * XXX proc_rwmem() doesn't currently return ENOSPC,
1274                  * so I think it can bogusly return 0. Neither do
1275                  * we allow short writes.
1276                  */
1277                 if (uio.uio_resid != 0 && error == 0)
1278                         error = ENOSPC;
1279                 if (!locked)
1280                         PRELE(td->td_proc);
1281         }
1282
1283         r->bspstore += r->ndirty;
1284         r->ndirty = 0;
1285         return (error);
1286 }
1287
1288 int
1289 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
1290 {
1291         struct trapframe *tf;
1292         int error;
1293
1294         tf = td->td_frame;
1295         bzero(mc, sizeof(*mc));
1296         mc->mc_special = tf->tf_special;
1297         error = ia64_flush_dirty(td, &mc->mc_special);
1298         if (tf->tf_flags & FRAME_SYSCALL) {
1299                 mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT;
1300                 mc->mc_scratch = tf->tf_scratch;
1301                 if (flags & GET_MC_CLEAR_RET) {
1302                         mc->mc_scratch.gr8 = 0;
1303                         mc->mc_scratch.gr9 = 0;
1304                         mc->mc_scratch.gr10 = 0;
1305                         mc->mc_scratch.gr11 = 0;
1306                 }
1307         } else {
1308                 mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT;
1309                 mc->mc_scratch = tf->tf_scratch;
1310                 mc->mc_scratch_fp = tf->tf_scratch_fp;
1311                 /*
1312                  * XXX If the thread never used the high FP registers, we
1313                  * probably shouldn't waste time saving them.
1314                  */
1315                 ia64_highfp_save(td);
1316                 mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID;
1317                 mc->mc_high_fp = td->td_pcb->pcb_high_fp;
1318         }
1319         save_callee_saved(&mc->mc_preserved);
1320         save_callee_saved_fp(&mc->mc_preserved_fp);
1321         return (error);
1322 }
1323
1324 int
1325 set_mcontext(struct thread *td, const mcontext_t *mc)
1326 {
1327         struct _special s;
1328         struct trapframe *tf;
1329         uint64_t psrmask;
1330
1331         tf = td->td_frame;
1332
1333         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1334             ("Whoa there! We have more than 8KB of dirty registers!"));
1335
1336         s = mc->mc_special;
1337         /*
1338          * Only copy the user mask and the restart instruction bit from
1339          * the new context.
1340          */
1341         psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1342             IA64_PSR_MFH | IA64_PSR_RI;
1343         s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask);
1344         /* We don't have any dirty registers of the new context. */
1345         s.ndirty = 0;
1346         if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
1347                 /*
1348                  * We can get an async context passed to us while we
1349                  * entered the kernel through a syscall: sigreturn(2)
1350                  * takes contexts that could previously be the result of
1351                  * a trap or interrupt.
1352                  * Hence, we cannot assert that the trapframe is not
1353                  * a syscall frame, but we can assert that it's at
1354                  * least an expected syscall.
1355                  */
1356                 if (tf->tf_flags & FRAME_SYSCALL) {
1357                         KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn, ("foo"));
1358                         tf->tf_flags &= ~FRAME_SYSCALL;
1359                 }
1360                 tf->tf_scratch = mc->mc_scratch;
1361                 tf->tf_scratch_fp = mc->mc_scratch_fp;
1362                 if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID)
1363                         td->td_pcb->pcb_high_fp = mc->mc_high_fp;
1364         } else {
1365                 KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo"));
1366                 if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) {
1367                         s.cfm = tf->tf_special.cfm;
1368                         s.iip = tf->tf_special.iip;
1369                         tf->tf_scratch.gr15 = 0;        /* Clear syscall nr. */
1370                 } else
1371                         tf->tf_scratch = mc->mc_scratch;
1372         }
1373         tf->tf_special = s;
1374         restore_callee_saved(&mc->mc_preserved);
1375         restore_callee_saved_fp(&mc->mc_preserved_fp);
1376
1377         return (0);
1378 }
1379
1380 /*
1381  * Clear registers on exec.
1382  */
1383 void
1384 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1385 {
1386         struct trapframe *tf;
1387         uint64_t *ksttop, *kst;
1388
1389         tf = td->td_frame;
1390         ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty +
1391             (tf->tf_special.bspstore & 0x1ffUL));
1392
1393         /*
1394          * We can ignore up to 8KB of dirty registers by masking off the
1395          * lower 13 bits in exception_restore() or epc_syscall(). This
1396          * should be enough for a couple of years, but if there are more
1397          * than 8KB of dirty registers, we lose track of the bottom of
1398          * the kernel stack. The solution is to copy the active part of
1399          * the kernel stack down 1 page (or 2, but not more than that)
1400          * so that we always have less than 8KB of dirty registers.
1401          */
1402         KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0,
1403             ("Whoa there! We have more than 8KB of dirty registers!"));
1404
1405         bzero(&tf->tf_special, sizeof(tf->tf_special));
1406         if ((tf->tf_flags & FRAME_SYSCALL) == 0) {      /* break syscalls. */
1407                 bzero(&tf->tf_scratch, sizeof(tf->tf_scratch));
1408                 bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp));
1409                 tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL;
1410                 tf->tf_special.bspstore = IA64_BACKINGSTORE;
1411                 /*
1412                  * Copy the arguments onto the kernel register stack so that
1413                  * they get loaded by the loadrs instruction. Skip over the
1414                  * NaT collection points.
1415                  */
1416                 kst = ksttop - 1;
1417                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1418                         *kst-- = 0;
1419                 *kst-- = 0;
1420                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1421                         *kst-- = 0;
1422                 *kst-- = imgp->ps_strings;
1423                 if (((uintptr_t)kst & 0x1ff) == 0x1f8)
1424                         *kst-- = 0;
1425                 *kst = stack;
1426                 tf->tf_special.ndirty = (ksttop - kst) << 3;
1427         } else {                                /* epc syscalls (default). */
1428                 tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL;
1429                 tf->tf_special.bspstore = IA64_BACKINGSTORE + 24;
1430                 /*
1431                  * Write values for out0, out1 and out2 to the user's backing
1432                  * store and arrange for them to be restored into the user's
1433                  * initial register frame.
1434                  * Assumes that (bspstore & 0x1f8) < 0x1e0.
1435                  */
1436                 suword((caddr_t)tf->tf_special.bspstore - 24, stack);
1437                 suword((caddr_t)tf->tf_special.bspstore - 16, imgp->ps_strings);
1438                 suword((caddr_t)tf->tf_special.bspstore -  8, 0);
1439         }
1440
1441         tf->tf_special.iip = imgp->entry_addr;
1442         tf->tf_special.sp = (stack & ~15) - 16;
1443         tf->tf_special.rsc = 0xf;
1444         tf->tf_special.fpsr = IA64_FPSR_DEFAULT;
1445         tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT |
1446             IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN |
1447             IA64_PSR_CPL_USER;
1448 }
1449
1450 int
1451 ptrace_set_pc(struct thread *td, unsigned long addr)
1452 {
1453         uint64_t slot;
1454
1455         switch (addr & 0xFUL) {
1456         case 0:
1457                 slot = IA64_PSR_RI_0;
1458                 break;
1459         case 1:
1460                 /* XXX we need to deal with MLX bundles here */
1461                 slot = IA64_PSR_RI_1;
1462                 break;
1463         case 2:
1464                 slot = IA64_PSR_RI_2;
1465                 break;
1466         default:
1467                 return (EINVAL);
1468         }
1469
1470         td->td_frame->tf_special.iip = addr & ~0x0FULL;
1471         td->td_frame->tf_special.psr =
1472             (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot;
1473         return (0);
1474 }
1475
1476 int
1477 ptrace_single_step(struct thread *td)
1478 {
1479         struct trapframe *tf;
1480
1481         /*
1482          * There's no way to set single stepping when we're leaving the
1483          * kernel through the EPC syscall path. The way we solve this is
1484          * by enabling the lower-privilege trap so that we re-enter the
1485          * kernel as soon as the privilege level changes. See trap.c for
1486          * how we proceed from there.
1487          */
1488         tf = td->td_frame;
1489         if (tf->tf_flags & FRAME_SYSCALL)
1490                 tf->tf_special.psr |= IA64_PSR_LP;
1491         else
1492                 tf->tf_special.psr |= IA64_PSR_SS;
1493         return (0);
1494 }
1495
1496 int
1497 ptrace_clear_single_step(struct thread *td)
1498 {
1499         struct trapframe *tf;
1500
1501         /*
1502          * Clear any and all status bits we may use to implement single
1503          * stepping.
1504          */
1505         tf = td->td_frame;
1506         tf->tf_special.psr &= ~IA64_PSR_SS;
1507         tf->tf_special.psr &= ~IA64_PSR_LP;
1508         tf->tf_special.psr &= ~IA64_PSR_TB;
1509         return (0);
1510 }
1511
1512 int
1513 fill_regs(struct thread *td, struct reg *regs)
1514 {
1515         struct trapframe *tf;
1516
1517         tf = td->td_frame;
1518         regs->r_special = tf->tf_special;
1519         regs->r_scratch = tf->tf_scratch;
1520         save_callee_saved(&regs->r_preserved);
1521         return (0);
1522 }
1523
1524 int
1525 set_regs(struct thread *td, struct reg *regs)
1526 {
1527         struct trapframe *tf;
1528         int error;
1529
1530         tf = td->td_frame;
1531         error = ia64_flush_dirty(td, &tf->tf_special);
1532         if (!error) {
1533                 tf->tf_special = regs->r_special;
1534                 tf->tf_special.bspstore += tf->tf_special.ndirty;
1535                 tf->tf_special.ndirty = 0;
1536                 tf->tf_scratch = regs->r_scratch;
1537                 restore_callee_saved(&regs->r_preserved);
1538         }
1539         return (error);
1540 }
1541
1542 int
1543 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1544 {
1545
1546         return (ENOSYS);
1547 }
1548
1549 int
1550 set_dbregs(struct thread *td, struct dbreg *dbregs)
1551 {
1552
1553         return (ENOSYS);
1554 }
1555
1556 int
1557 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1558 {
1559         struct trapframe *frame = td->td_frame;
1560         struct pcb *pcb = td->td_pcb;
1561
1562         /* Save the high FP registers. */
1563         ia64_highfp_save(td);
1564
1565         fpregs->fpr_scratch = frame->tf_scratch_fp;
1566         save_callee_saved_fp(&fpregs->fpr_preserved);
1567         fpregs->fpr_high = pcb->pcb_high_fp;
1568         return (0);
1569 }
1570
1571 int
1572 set_fpregs(struct thread *td, struct fpreg *fpregs)
1573 {
1574         struct trapframe *frame = td->td_frame;
1575         struct pcb *pcb = td->td_pcb;
1576
1577         /* Throw away the high FP registers (should be redundant). */
1578         ia64_highfp_drop(td);
1579
1580         frame->tf_scratch_fp = fpregs->fpr_scratch;
1581         restore_callee_saved_fp(&fpregs->fpr_preserved);
1582         pcb->pcb_high_fp = fpregs->fpr_high;
1583         return (0);
1584 }
1585
1586 void
1587 ia64_sync_icache(vm_offset_t va, vm_offset_t sz)
1588 {
1589         vm_offset_t lim;
1590
1591         if (!ia64_sync_icache_needed)
1592                 return;
1593
1594         lim = va + sz;
1595         while (va < lim) {
1596                 ia64_fc_i(va);
1597                 va += 32;       /* XXX */
1598         }
1599
1600         ia64_sync_i();
1601         ia64_srlz_i();
1602 }