2 * Copyright (c) 2005 Olivier Houchard
3 * Copyright (c) 1989, 1992, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software developed by the Computer Systems
7 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
8 * BG 91-66 and contributed to Berkeley.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * ARM machine dependent routines for kvm.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/endian.h>
50 #include <machine/vmparam.h>
53 #include "kvm_private.h"
63 * Translate a physical memory address to a file-offset in the crash-dump.
66 _kvm_pa2off(kvm_t *kd, uint64_t pa, off_t *ofs, size_t pgsz)
68 struct vmstate *vm = kd->vmst;
74 while (n && (pa < p->p_paddr || pa >= p->p_paddr + p->p_memsz))
79 *ofs = (pa - p->p_paddr) + p->p_offset;
81 return (p->p_memsz - (pa - p->p_paddr));
82 return (pgsz - ((size_t)pa & (pgsz - 1)));
86 _arm_freevtop(kvm_t *kd)
88 struct vmstate *vm = kd->vmst;
99 return (_kvm_probe_elf_kernel(kd, ELFCLASS32, EM_ARM) &&
100 !_kvm_is_minidump(kd));
104 _arm_initvtop(kvm_t *kd)
107 struct kvm_nlist nl[2];
109 arm_physaddr_t physaddr, pa;
110 arm_pd_entry_t *l1pt;
115 _kvm_err(kd, kd->program, "raw dumps not supported on arm");
119 vm = _kvm_malloc(kd, sizeof(*vm));
121 _kvm_err(kd, kd->program, "cannot allocate vm");
127 if (_kvm_read_core_phdrs(kd, &vm->phnum, &vm->phdr) == -1)
131 for (i = 0; i < vm->phnum; i++) {
132 if (vm->phdr[i].p_type == PT_DUMP_DELTA) {
133 kernbase = vm->phdr[i].p_vaddr;
134 physaddr = vm->phdr[i].p_paddr;
142 nl[0].n_name = "kernbase";
143 if (kvm_nlist2(kd, nl) != 0) {
147 _kvm_err(kd, kd->program, "cannot resolve kernbase");
151 kernbase = nl[0].n_value;
153 nl[0].n_name = "physaddr";
154 if (kvm_nlist2(kd, nl) != 0) {
155 _kvm_err(kd, kd->program, "couldn't get phys addr");
158 physaddr = nl[0].n_value;
160 nl[0].n_name = "kernel_l1pa";
161 if (kvm_nlist2(kd, nl) != 0) {
162 _kvm_err(kd, kd->program, "bad namelist");
165 if (kvm_read2(kd, (nl[0].n_value - kernbase + physaddr), &pa,
166 sizeof(pa)) != sizeof(pa)) {
167 _kvm_err(kd, kd->program, "cannot read kernel_l1pa");
170 l1pt = _kvm_malloc(kd, ARM_L1_TABLE_SIZE);
172 _kvm_err(kd, kd->program, "cannot allocate l1pt");
175 if (kvm_read2(kd, pa, l1pt, ARM_L1_TABLE_SIZE) != ARM_L1_TABLE_SIZE) {
176 _kvm_err(kd, kd->program, "cannot read l1pt");
184 /* from arm/pmap.c */
185 #define ARM_L1_IDX(va) ((va) >> ARM_L1_S_SHIFT)
187 #define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S)
188 #define l1pte_valid(pde) ((pde) != 0)
189 #define l2pte_valid(pte) ((pte) != 0)
190 #define l2pte_index(v) (((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT)
194 _arm_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
196 struct vmstate *vm = kd->vmst;
199 arm_physaddr_t pte_pa;
202 if (vm->l1pt == NULL)
203 return (_kvm_pa2off(kd, va, pa, ARM_PAGE_SIZE));
204 pd = _kvm32toh(kd, vm->l1pt[ARM_L1_IDX(va)]);
205 if (!l1pte_valid(pd))
207 if (l1pte_section_p(pd)) {
208 /* 1MB section mapping. */
209 *pa = (pd & ARM_L1_S_ADDR_MASK) + (va & ARM_L1_S_OFFSET);
210 return (_kvm_pa2off(kd, *pa, pa, ARM_L1_S_SIZE));
212 pte_pa = (pd & ARM_L1_C_ADDR_MASK) + l2pte_index(va) * sizeof(pte);
213 _kvm_pa2off(kd, pte_pa, &pte_off, ARM_L1_S_SIZE);
214 if (pread(kd->pmfd, &pte, sizeof(pte), pte_off) != sizeof(pte)) {
215 _kvm_syserr(kd, kd->program, "_arm_kvatop: pread");
218 pte = _kvm32toh(kd, pte);
219 if (!l2pte_valid(pte)) {
222 if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
223 *pa = (pte & ARM_L2_L_FRAME) | (va & ARM_L2_L_OFFSET);
224 return (_kvm_pa2off(kd, *pa, pa, ARM_L2_L_SIZE));
226 *pa = (pte & ARM_L2_S_FRAME) | (va & ARM_L2_S_OFFSET);
227 return (_kvm_pa2off(kd, *pa, pa, ARM_PAGE_SIZE));
229 _kvm_err(kd, 0, "Invalid address (%jx)", (uintmax_t)va);
234 * Machine-dependent initialization for ALL open kvm descriptors,
235 * not just those for a kernel crash dump. Some architectures
236 * have to deal with these NOT being constants! (i.e. m68k)
240 _kvm_mdopen(kvm_t *kd)
243 kd->usrstack = USRSTACK;
244 kd->min_uva = VM_MIN_ADDRESS;
245 kd->max_uva = VM_MAXUSER_ADDRESS;
253 _arm_native(kvm_t *kd)
255 _arm_native(kvm_t *kd __unused)
260 #if _BYTE_ORDER == _LITTLE_ENDIAN
261 return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2LSB);
263 return (kd->nlehdr.e_ident[EI_DATA] == ELFDATA2MSB);
270 static struct kvm_arch kvm_arm = {
271 .ka_probe = _arm_probe,
272 .ka_initvtop = _arm_initvtop,
273 .ka_freevtop = _arm_freevtop,
274 .ka_kvatop = _arm_kvatop,
275 .ka_native = _arm_native,