2 /* $NetBSD: kvm_alpha.c,v 1.7.2.1 1997/11/02 20:34:26 mellon Exp $ */
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
8 * Author: Chris G. Demetriou
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
20 * Carnegie Mellon requests users of this software to return to
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
31 #include <sys/types.h>
32 #include <sys/elf64.h>
36 #include <machine/atomic.h>
37 #include <machine/bootinfo.h>
38 #include <machine/elf.h>
39 #include <machine/pte.h>
41 #include "../../sys/ia64/include/atomic.h"
42 #include "../../sys/ia64/include/bootinfo.h"
43 #include "../../sys/ia64/include/elf.h"
44 #include "../../sys/ia64/include/pte.h"
53 #include "kvm_private.h"
55 #define REGION_BASE(n) (((uint64_t)(n)) << 61)
56 #define REGION_ADDR(x) ((x) & ((1LL<<61)-1LL))
58 #define NKPTEPG(ps) ((ps) / sizeof(struct ia64_lpte))
59 #define NKPTEDIR(ps) ((ps) >> 3)
60 #define KPTE_PTE_INDEX(va,ps) (((va)/(ps)) % NKPTEPG(ps))
61 #define KPTE_DIR0_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) / NKPTEDIR(ps))
62 #define KPTE_DIR1_INDEX(va,ps) ((((va)/(ps)) / NKPTEPG(ps)) % NKPTEDIR(ps))
64 #define PBVM_BASE 0x9ffc000000000000UL
65 #define PBVM_PGSZ (64 * 1024)
67 typedef size_t (a2p_f)(kvm_t *, uint64_t, off_t *);
80 * Map the ELF headers into the process' address space. We do this in two
81 * steps: first the ELF header itself and using that information the whole
85 ia64_maphdrs(kvm_t *kd, size_t sz)
87 struct vmstate *vm = kd->vmst;
89 /* munmap() previous mmap(). */
90 if (vm->mmapbase != NULL) {
91 munmap(vm->mmapbase, vm->mmapsize);
96 vm->mmapbase = mmap(NULL, sz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
97 if (vm->mmapbase == MAP_FAILED) {
98 _kvm_err(kd, kd->program, "cannot mmap corefile");
106 * Physical core support.
110 phys_addr2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
116 if (pa != REGION_ADDR(pa))
119 e = (Elf64_Ehdr *)(kd->vmst->mmapbase);
121 p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff);
122 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
127 *ofs = (pa - p->p_paddr) + p->p_offset;
129 return (p->p_memsz - (pa - p->p_paddr));
130 return (pgsz - ((size_t)pa & (pgsz - 1)));
133 _kvm_err(kd, kd->program, "invalid physical address %#jx",
139 phys_kvatop(kvm_t *kd, uint64_t va, off_t *ofs)
141 struct ia64_lpte pte;
142 uint64_t pa, pgaddr, pt0addr, pt1addr;
143 size_t pgno, pgsz, pt0no, pt1no;
145 if (va >= REGION_BASE(6)) {
146 /* Regions 6 and 7: direct mapped. */
147 pa = REGION_ADDR(va);
148 return (phys_addr2off(kd, pa, ofs, 0));
149 } else if (va >= REGION_BASE(5)) {
150 /* Region 5: Kernel Virtual Memory. */
151 va = REGION_ADDR(va);
152 pgsz = kd->vmst->pagesize;
153 pt0no = KPTE_DIR0_INDEX(va, pgsz);
154 pt1no = KPTE_DIR1_INDEX(va, pgsz);
155 pgno = KPTE_PTE_INDEX(va, pgsz);
156 if (pt0no >= NKPTEDIR(pgsz))
158 pt0addr = kd->vmst->kptdir + (pt0no << 3);
159 if (kvm_read(kd, pt0addr, &pt1addr, 8) != 8)
163 pt1addr += pt1no << 3;
164 if (kvm_read(kd, pt1addr, &pgaddr, 8) != 8)
168 pgaddr += pgno * sizeof(pte);
169 if (kvm_read(kd, pgaddr, &pte, sizeof(pte)) != sizeof(pte))
171 if (!(pte.pte & PTE_PRESENT))
173 pa = (pte.pte & PTE_PPN_MASK) + (va & (pgsz - 1));
174 return (phys_addr2off(kd, pa, ofs, pgsz));
175 } else if (va >= PBVM_BASE) {
176 /* Region 4: Pre-Boot Virtual Memory (PBVM). */
180 if (pt0no >= (kd->vmst->pbvm_pgtblsz >> 3))
182 pt0addr = kd->vmst->pbvm_pgtbl[pt0no];
183 if (!(pt0addr & PTE_PRESENT))
185 pa = (pt0addr & PTE_PPN_MASK) + va % pgsz;
186 return (phys_addr2off(kd, pa, ofs, pgsz));
190 _kvm_err(kd, kd->program, "invalid kernel virtual address %#jx",
197 phys_read(kvm_t *kd, uint64_t pa, void *buf, size_t bufsz)
202 sz = phys_addr2off(kd, pa, &ofs, 0);
204 return ((ssize_t)sz);
206 if (lseek(kd->pmfd, ofs, 0) == -1)
208 return (read(kd->pmfd, buf, bufsz));
212 * Virtual core support (aka minidump).
216 virt_addr2off(kvm_t *kd, uint64_t va, off_t *ofs, size_t pgsz)
222 if (va < REGION_BASE(4))
225 e = (Elf64_Ehdr *)(kd->vmst->mmapbase);
227 p = (Elf64_Phdr *)(void *)((uintptr_t)(void *)e + e->e_phoff);
228 while (n && (va < p->p_vaddr || va >= p->p_vaddr + p->p_memsz))
233 *ofs = (va - p->p_vaddr) + p->p_offset;
235 return (p->p_memsz - (va - p->p_vaddr));
236 return (pgsz - ((size_t)va & (pgsz - 1)));
239 _kvm_err(kd, kd->program, "invalid virtual address %#jx",
245 virt_kvatop(kvm_t *kd, uint64_t va, off_t *ofs)
248 return (virt_addr2off(kd, va, ofs, 0));
252 * KVM architecture support functions.
256 _kvm_freevtop(kvm_t *kd)
258 struct vmstate *vm = kd->vmst;
260 if (vm->pbvm_pgtbl != NULL)
261 free(vm->pbvm_pgtbl);
262 if (vm->mmapbase != NULL)
263 munmap(vm->mmapbase, vm->mmapsize);
269 _kvm_initvtop(kvm_t *kd)
278 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
279 if (kd->vmst == NULL) {
280 _kvm_err(kd, kd->program, "cannot allocate vm");
285 kd->vmst->pagesize = getpagesize();
287 kd->vmst->pagesize = 8192;
290 if (ia64_maphdrs(kd, sizeof(Elf64_Ehdr)) == -1)
293 ehdr = kd->vmst->mmapbase;
294 hdrsz = ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum;
295 if (ia64_maphdrs(kd, hdrsz) == -1)
298 kd->vmst->kvatop = (ehdr->e_flags & EF_IA_64_ABSOLUTE) ?
299 phys_kvatop : virt_kvatop;
302 * Load the PBVM page table. We need this to resolve PBVM addresses.
303 * The PBVM page table is obtained from the bootinfo structure, of
304 * which the address is given to us in e_entry. If e_entry is 0, then
305 * this is assumed to be a pre-PBVM kernel.
306 * Note that the address of the bootinfo structure is either physical
307 * or virtual, depending on whether the core is physical or virtual.
309 if (ehdr->e_entry != 0 && (ehdr->e_flags & EF_IA_64_ABSOLUTE) != 0) {
310 sz = phys_read(kd, ehdr->e_entry, &bi, sizeof(bi));
311 if (sz != sizeof(bi)) {
312 _kvm_err(kd, kd->program,
313 "cannot read bootinfo at physical address %#jx",
314 (uintmax_t)ehdr->e_entry);
317 if (bi.bi_magic != BOOTINFO_MAGIC) {
318 _kvm_err(kd, kd->program, "invalid bootinfo");
321 kd->vmst->pbvm_pgtbl = _kvm_malloc(kd, bi.bi_pbvm_pgtblsz);
322 if (kd->vmst->pbvm_pgtbl == NULL) {
323 _kvm_err(kd, kd->program, "cannot allocate page table");
326 kd->vmst->pbvm_pgtblsz = bi.bi_pbvm_pgtblsz;
327 sz = phys_read(kd, bi.bi_pbvm_pgtbl, kd->vmst->pbvm_pgtbl,
329 if (sz != bi.bi_pbvm_pgtblsz) {
330 _kvm_err(kd, kd->program,
331 "cannot read page table at physical address %#jx",
332 (uintmax_t)bi.bi_pbvm_pgtbl);
336 kd->vmst->pbvm_pgtbl = NULL;
337 kd->vmst->pbvm_pgtblsz = 0;
341 * At this point we've got enough information to use kvm_read() for
342 * direct mapped (ie region 6 and region 7) address, such as symbol
346 nl[0].n_name = "ia64_kptdir";
349 if (kvm_nlist(kd, nl) != 0) {
350 _kvm_err(kd, kd->program, "bad namelist");
354 if (kvm_read(kd, (nl[0].n_value), &va, sizeof(va)) != sizeof(va)) {
355 _kvm_err(kd, kd->program, "cannot read kptdir");
359 if (va == REGION_BASE(5)) {
360 _kvm_err(kd, kd->program, "kptdir is itself virtual");
364 kd->vmst->kptdir = va;
369 _kvm_kvatop(kvm_t *kd, u_long va, off_t *ofs)
373 sz = kd->vmst->kvatop(kd, va, ofs);
374 return ((sz > INT_MAX) ? INT_MAX : sz);