2 /* $NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $ */
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
8 * Author: Chris G. Demetriou
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
20 * Carnegie Mellon requests users of this software to return to
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
31 #include <sys/types.h>
32 #include <sys/elf64.h>
35 #include <machine/atomic.h>
36 #include <machine/pte.h>
43 #include "kvm_private.h"
45 #define REGION_BASE(n) (((uint64_t)(n)) << 61)
46 #define REGION_ADDR(x) ((x) & ((1LL<<61)-1LL))
48 #define NKPTEPG(ps) ((ps) / sizeof(struct ia64_lpte))
49 #define NKPTEDIR(ps) ((ps) >> 3)
50 #define KPTE_PTE_INDEX(va,ps) (((va)/(ps)) % NKPTEPG(ps))
51 #define KPTE_DIR0_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) / NKPTEDIR(ps))
52 #define KPTE_DIR1_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) % NKPTEDIR(ps))
62 * Map the ELF headers into the process' address space. We do this in two
63 * steps: first the ELF header itself and using that information the whole
67 _kvm_maphdrs(kvm_t *kd, size_t sz)
69 struct vmstate *vm = kd->vmst;
71 /* munmap() previous mmap(). */
72 if (vm->mmapbase != NULL) {
73 munmap(vm->mmapbase, vm->mmapsize);
78 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
79 if (vm->mmapbase == MAP_FAILED) {
80 _kvm_err(kd, kd->program, "cannot mmap corefile");
88 * Translate a physical memory address to a file-offset in the crash-dump.
91 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
93 Elf64_Ehdr *e = kd->vmst->mmapbase;
94 Elf64_Phdr *p = (Elf64_Phdr*)((char*)e + e->e_phoff);
97 if (pa != REGION_ADDR(pa)) {
98 _kvm_err(kd, kd->program, "internal error");
102 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
107 *ofs = (pa - p->p_paddr) + p->p_offset;
109 return (p->p_memsz - (pa - p->p_paddr));
110 return (pgsz - ((size_t)pa & (pgsz - 1)));
114 _kvm_freevtop(kvm_t *kd)
116 struct vmstate *vm = kd->vmst;
118 if (vm->mmapbase != NULL)
119 munmap(vm->mmapbase, vm->mmapsize);
125 _kvm_initvtop(kvm_t *kd)
132 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
133 if (kd->vmst == NULL) {
134 _kvm_err(kd, kd->program, "cannot allocate vm");
138 kd->vmst->pagesize = getpagesize();
140 if (_kvm_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1)
143 ehdr = kd->vmst->mmapbase;
144 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
145 if (_kvm_maphdrs(kd, hdrsz) == -1)
149 * At this point we've got enough information to use kvm_read() for
150 * direct mapped (ie region 6 and region 7) address, such as symbol
154 nl[0].n_name = "ia64_kptdir";
157 if (kvm_nlist(kd, nl) != 0) {
158 _kvm_err(kd, kd->program, "bad namelist");
162 if (kvm_read(kd, (nl[0].n_value), &va, sizeof(va)) != sizeof(va)) {
163 _kvm_err(kd, kd->program, "cannot read kptdir");
167 if (va < REGION_BASE(6)) {
168 _kvm_err(kd, kd->program, "kptdir is itself virtual");
172 kd->vmst->kptdir = va;
177 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
179 struct ia64_lpte pte;
180 uint64_t pgaddr, pt0addr, pt1addr;
181 size_t pgno, pgsz, pt0no, pt1no;
183 if (va >= REGION_BASE(6)) {
184 /* Regions 6 and 7: direct mapped. */
185 return (_kvm_pa2off(kd, REGION_ADDR(va), pa, 0));
186 } else if (va >= REGION_BASE(5)) {
187 /* Region 5: virtual. */
188 va = REGION_ADDR(va);
189 pgsz = kd->vmst->pagesize;
190 pt0no = KPTE_DIR0_INDEX(va, pgsz);
191 pt1no = KPTE_DIR1_INDEX(va, pgsz);
192 pgno = KPTE_PTE_INDEX(va, pgsz);
193 if (pt0no >= NKPTEDIR(pgsz))
195 pt0addr = kd->vmst->kptdir + (pt0no << 3);
196 if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8)
200 pt1addr += pt1no << 3;
201 if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8)
205 pgaddr += pgno * sizeof(pte);
206 if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte))
208 if (!(pte.pte & PTE_PRESENT))
210 va = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1));
211 return (_kvm_pa2off(kd, va, pa, pgsz));
215 _kvm_err(kd, kd->program, "invalid kernel virtual address");