2 /* $NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $ */
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
8 * Author: Chris G. Demetriou
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
20 * Carnegie Mellon requests users of this software to return to
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
31 #include <sys/types.h>
32 #include <sys/elf64.h>
35 #include <machine/pte.h>
42 #include "kvm_private.h"
44 #define REGION_BASE(n) (((uint64_t)(n)) << 61)
45 #define REGION_ADDR(x) ((x) & ((1LL<<61)-1LL))
47 #define NKPTEPG(ps) ((ps) / sizeof(struct ia64_lpte))
48 #define NKPTEDIR(ps) ((ps) >> 3)
49 #define KPTE_PTE_INDEX(va,ps) (((va)/(ps)) % NKPTEPG(ps))
50 #define KPTE_DIR0_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) / NKPTEDIR(ps))
51 #define KPTE_DIR1_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) % NKPTEDIR(ps))
61 * Map the ELF headers into the process' address space. We do this in two
62 * steps: first the ELF header itself and using that information the whole
66 _kvm_maphdrs(kvm_t *kd, size_t sz)
68 struct vmstate *vm = kd->vmst;
70 /* munmap() previous mmap(). */
71 if (vm->mmapbase != NULL) {
72 munmap(vm->mmapbase, vm->mmapsize);
77 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
78 if (vm->mmapbase == MAP_FAILED) {
79 _kvm_err(kd, kd->program, "cannot mmap corefile");
87 * Translate a physical memory address to a file-offset in the crash-dump.
90 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
92 Elf64_Ehdr *e = kd->vmst->mmapbase;
93 Elf64_Phdr *p = (Elf64_Phdr*)((char*)e + e->e_phoff);
96 if (pa != REGION_ADDR(pa)) {
97 _kvm_err(kd, kd->program, "internal error");
101 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
106 *ofs = (pa - p->p_paddr) + p->p_offset;
108 return (p->p_memsz - (pa - p->p_paddr));
109 return (pgsz - ((size_t)pa & (pgsz - 1)));
113 _kvm_freevtop(kvm_t *kd)
115 struct vmstate *vm = kd->vmst;
117 if (vm->mmapbase != NULL)
118 munmap(vm->mmapbase, vm->mmapsize);
124 _kvm_initvtop(kvm_t *kd)
126 struct nlist nlist[2];
131 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
132 if (kd->vmst == NULL) {
133 _kvm_err(kd, kd->program, "cannot allocate vm");
137 kd->vmst->pagesize = getpagesize();
139 if (_kvm_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1)
142 ehdr = kd->vmst->mmapbase;
143 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
144 if (_kvm_maphdrs(kd, hdrsz) == -1)
148 * At this point we've got enough information to use kvm_read() for
149 * direct mapped (ie region 6 and region 7) address, such as symbol
153 nlist[0].n_name = "ia64_kptdir";
156 if (kvm_nlist(kd, nlist) != 0) {
157 _kvm_err(kd, kd->program, "bad namelist");
161 if (kvm_read(kd, (nlist[0].n_value), &va, sizeof(va)) != sizeof(va)) {
162 _kvm_err(kd, kd->program, "cannot read kptdir");
166 if (va < REGION_BASE(6)) {
167 _kvm_err(kd, kd->program, "kptdir is itself virtual");
171 kd->vmst->kptdir = va;
176 _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
178 struct ia64_lpte pte;
179 uint64_t pgaddr, pt0addr, pt1addr;
180 size_t pgno, pgsz, pt0no, pt1no;
182 if (va >= REGION_BASE(6)) {
183 /* Regions 6 and 7: direct mapped. */
184 return (_kvm_pa2off(kd, REGION_ADDR(va), pa, 0));
185 } else if (va >= REGION_BASE(5)) {
186 /* Region 5: virtual. */
187 va = REGION_ADDR(va);
188 pgsz = kd->vmst->pagesize;
189 pt0no = KPTE_DIR0_INDEX(va, pgsz);
190 pt1no = KPTE_DIR1_INDEX(va, pgsz);
191 pgno = KPTE_PTE_INDEX(va, pgsz);
192 if (pt0no >= NKPTEDIR(pgsz))
194 pt0addr = kd->vmst->kptdir + (pt0no << 3);
195 if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8)
199 pt1addr += pt1no << 3;
200 if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8)
204 pgaddr += pgno * sizeof(pte);
205 if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte))
207 if (!(pte.pte & PTE_PRESENT))
209 va = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1));
210 return (_kvm_pa2off(kd, va, pa, pgsz));
214 _kvm_err(kd, kd->program, "invalid kernel virtual address");