2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2008, Juniper Networks, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kerneldump.h>
45 #include "kvm_private.h"
56 valid_elf_header(kvm_t *kd, Elf64_Ehdr *eh)
61 if (eh->e_ident[EI_CLASS] != ELFCLASS64)
63 if (eh->e_ident[EI_DATA] != ELFDATA2MSB &&
64 eh->e_ident[EI_DATA] != ELFDATA2LSB)
66 if (eh->e_ident[EI_VERSION] != EV_CURRENT)
68 if (eh->e_ident[EI_OSABI] != ELFOSABI_STANDALONE)
70 if (_kvm16toh(kd, eh->e_type) != ET_CORE)
72 if (_kvm16toh(kd, eh->e_machine) != EM_PPC64)
74 /* Can't think of anything else to check... */
79 dump_header_size(struct kerneldumpheader *dh)
82 if (strcmp(dh->magic, KERNELDUMPMAGIC) != 0)
84 if (strcmp(dh->architecture, "powerpc64") != 0 &&
85 strcmp(dh->architecture, "powerpc64le") != 0)
87 /* That should do it... */
92 * Map the ELF headers into the process' address space. We do this in two
93 * steps: first the ELF header itself and using that information the whole
97 powerpc_maphdrs(kvm_t *kd)
104 vm->mapsz = sizeof(*vm->eh) + sizeof(struct kerneldumpheader);
105 vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
106 if (vm->map == MAP_FAILED) {
107 _kvm_err(kd, kd->program, "cannot map corefile");
112 if (!valid_elf_header(kd, vm->eh)) {
114 * Hmmm, no ELF header. Maybe we still have a dump header.
115 * This is normal when the core file wasn't created by
116 * savecore(8), but instead was dumped over TFTP. We can
117 * easily skip the dump header...
119 vm->dmphdrsz = dump_header_size(vm->map);
120 if (vm->dmphdrsz == 0)
122 vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
123 if (!valid_elf_header(kd, vm->eh))
126 mapsz = _kvm16toh(kd, vm->eh->e_phentsize) *
127 _kvm16toh(kd, vm->eh->e_phnum) + _kvm64toh(kd, vm->eh->e_phoff);
128 munmap(vm->map, vm->mapsz);
130 /* Map all headers. */
131 vm->mapsz = vm->dmphdrsz + mapsz;
132 vm->map = mmap(NULL, vm->mapsz, PROT_READ, MAP_PRIVATE, kd->pmfd, 0);
133 if (vm->map == MAP_FAILED) {
134 _kvm_err(kd, kd->program, "cannot map corefile headers");
137 vm->eh = (void *)((uintptr_t)vm->map + vm->dmphdrsz);
138 vm->ph = (void *)((uintptr_t)vm->eh +
139 (uintptr_t)_kvm64toh(kd, vm->eh->e_phoff));
143 _kvm_err(kd, kd->program, "invalid corefile");
148 * Determine the offset within the corefile corresponding the virtual
149 * address. Return the number of contiguous bytes in the corefile or
150 * 0 when the virtual address is invalid.
153 powerpc64_va2off(kvm_t *kd, kvaddr_t va, off_t *ofs)
155 struct vmstate *vm = kd->vmst;
160 nph = _kvm16toh(kd, vm->eh->e_phnum);
161 while (nph && (va < _kvm64toh(kd, ph->p_vaddr) ||
162 va >= _kvm64toh(kd, ph->p_vaddr) + _kvm64toh(kd, ph->p_memsz))) {
164 ph = (void *)((uintptr_t)ph +
165 _kvm16toh(kd, vm->eh->e_phentsize));
170 /* Segment found. Return file offset and range. */
171 *ofs = vm->dmphdrsz + _kvm64toh(kd, ph->p_offset) +
172 (va - _kvm64toh(kd, ph->p_vaddr));
173 return (_kvm64toh(kd, ph->p_memsz) -
174 (va - _kvm64toh(kd, ph->p_vaddr)));
178 _powerpc64_freevtop(kvm_t *kd)
180 struct vmstate *vm = kd->vmst;
182 if (vm->eh != MAP_FAILED)
183 munmap(vm->eh, vm->mapsz);
189 _powerpc64_probe(kvm_t *kd)
192 return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
193 kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
197 _powerpc64le_probe(kvm_t *kd)
200 return (_kvm_probe_elf_kernel(kd, ELFCLASS64, EM_PPC64) &&
201 kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
205 _powerpc64_initvtop(kvm_t *kd)
208 kd->vmst = (struct vmstate *)_kvm_malloc(kd, sizeof(*kd->vmst));
209 if (kd->vmst == NULL)
212 if (powerpc_maphdrs(kd) == -1)
219 _powerpc64_kvatop(kvm_t *kd, kvaddr_t va, off_t *ofs)
224 if (_kvm64toh(kd, vm->ph->p_paddr) == 0xffffffffffffffff)
225 return ((int)powerpc64_va2off(kd, va, ofs));
227 _kvm_err(kd, kd->program, "Raw corefile not supported");
232 _powerpc64_native(kvm_t *kd __unused)
235 #if defined(__powerpc64__) && BYTE_ORDER == BIG_ENDIAN
243 _powerpc64le_native(kvm_t *kd __unused)
246 #if defined(__powerpc64__) && BYTE_ORDER == LITTLE_ENDIAN
253 static struct kvm_arch kvm_powerpc64 = {
254 .ka_probe = _powerpc64_probe,
255 .ka_initvtop = _powerpc64_initvtop,
256 .ka_freevtop = _powerpc64_freevtop,
257 .ka_kvatop = _powerpc64_kvatop,
258 .ka_native = _powerpc64_native,
261 static struct kvm_arch kvm_powerpc64le = {
262 .ka_probe = _powerpc64le_probe,
263 .ka_initvtop = _powerpc64_initvtop,
264 .ka_freevtop = _powerpc64_freevtop,
265 .ka_kvatop = _powerpc64_kvatop,
266 .ka_native = _powerpc64le_native,
269 KVM_ARCH(kvm_powerpc64);
270 KVM_ARCH(kvm_powerpc64le);