2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright 1996-1998 John D. Polstra.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/systm.h>
35 #include <sys/imgact.h>
36 #include <sys/linker.h>
37 #include <sys/sysent.h>
38 #include <sys/imgact_elf.h>
40 #include <sys/syscall.h>
41 #include <sys/signalvar.h>
42 #include <sys/vnode.h>
46 #include <vm/vm_param.h>
48 #include <machine/elf.h>
49 #include <machine/md_var.h>
51 #include <machine/vfp.h>
54 static boolean_t elf32_arm_abi_supported(struct image_params *);
59 struct sysentvec elf32_freebsd_sysvec = {
60 .sv_size = SYS_MAXSYSCALL,
65 .sv_fixup = __elfN(freebsd_fixup),
66 .sv_sendsig = sendsig,
67 .sv_sigcode = sigcode,
68 .sv_szsigcode = &szsigcode,
69 .sv_name = "FreeBSD ELF32",
70 .sv_coredump = __elfN(coredump),
71 .sv_imgact_try = NULL,
72 .sv_minsigstksz = MINSIGSTKSZ,
73 .sv_minuser = VM_MIN_ADDRESS,
74 .sv_maxuser = VM_MAXUSER_ADDRESS,
75 .sv_usrstack = USRSTACK,
76 .sv_psstrings = PS_STRINGS,
77 .sv_stackprot = VM_PROT_ALL,
78 .sv_copyout_strings = exec_copyout_strings,
79 .sv_setregs = exec_setregs,
84 SV_ASLR | SV_SHP | SV_TIMEKEEP |
86 SV_ABI_FREEBSD | SV_ILP32 | SV_ASLR,
87 .sv_set_syscall_retval = cpu_set_syscall_retval,
88 .sv_fetch_syscall_args = cpu_fetch_syscall_args,
89 .sv_syscallnames = syscallnames,
90 .sv_shared_page_base = SHAREDPAGE,
91 .sv_shared_page_len = PAGE_SIZE,
93 .sv_thread_detach = NULL,
95 .sv_hwcap = &elf_hwcap,
96 .sv_hwcap2 = &elf_hwcap2,
98 INIT_SYSENTVEC(elf32_sysvec, &elf32_freebsd_sysvec);
100 static Elf32_Brandinfo freebsd_brand_info = {
101 .brand = ELFOSABI_FREEBSD,
103 .compat_3_brand = "FreeBSD",
105 .interp_path = "/libexec/ld-elf.so.1",
106 .sysvec = &elf32_freebsd_sysvec,
107 .interp_newpath = NULL,
108 .brand_note = &elf32_freebsd_brandnote,
109 .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE,
110 .header_supported= elf32_arm_abi_supported,
113 SYSINIT(elf32, SI_SUB_EXEC, SI_ORDER_FIRST,
114 (sysinit_cfunc_t) elf32_insert_brand_entry,
115 &freebsd_brand_info);
118 elf32_arm_abi_supported(struct image_params *imgp)
120 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
123 * When configured for EABI, FreeBSD supports EABI vesions 4 and 5.
125 if (EF_ARM_EABI_VERSION(hdr->e_flags) < EF_ARM_EABI_FREEBSD_MIN) {
127 uprintf("Attempting to execute non EABI binary (rev %d) image %s",
128 EF_ARM_EABI_VERSION(hdr->e_flags), imgp->args->fname);
135 elf32_dump_thread(struct thread *td, void *dst, size_t *off)
141 get_vfpcontext(td, &vfp);
142 *off = elf32_populate_note(NT_ARM_VFP, &vfp, dst, sizeof(vfp),
145 *off = elf32_populate_note(NT_ARM_VFP, NULL, NULL, sizeof(vfp),
151 elf_is_ifunc_reloc(Elf_Size r_info __unused)
158 * It is possible for the compiler to emit relocations for unaligned data.
159 * We handle this situation with these inlines.
161 #define RELOC_ALIGNED_P(x) \
162 (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
164 static __inline Elf_Addr
165 load_ptr(Elf_Addr *where)
169 if (RELOC_ALIGNED_P(where))
171 memcpy(&res, where, sizeof(res));
176 store_ptr(Elf_Addr *where, Elf_Addr val)
178 if (RELOC_ALIGNED_P(where))
181 memcpy(where, &val, sizeof(val));
183 #undef RELOC_ALIGNED_P
186 /* Process one elf relocation with addend. */
188 elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data,
189 int type, int local, elf_lookup_fn lookup)
194 Elf_Word rtype, symidx;
196 const Elf_Rela *rela;
201 rel = (const Elf_Rel *)data;
202 where = (Elf_Addr *) (relocbase + rel->r_offset);
203 addend = load_ptr(where);
204 rtype = ELF_R_TYPE(rel->r_info);
205 symidx = ELF_R_SYM(rel->r_info);
208 rela = (const Elf_Rela *)data;
209 where = (Elf_Addr *) (relocbase + rela->r_offset);
210 addend = rela->r_addend;
211 rtype = ELF_R_TYPE(rela->r_info);
212 symidx = ELF_R_SYM(rela->r_info);
215 panic("unknown reloc type %d\n", type);
219 if (rtype == R_ARM_RELATIVE) { /* A + B */
220 addr = elf_relocaddr(lf, relocbase + addend);
221 if (load_ptr(where) != addr)
222 store_ptr(where, addr);
229 case R_ARM_NONE: /* none */
233 error = lookup(lf, symidx, 1, &addr);
236 store_ptr(where, addr + load_ptr(where));
239 case R_ARM_COPY: /* none */
241 * There shouldn't be copy relocations in kernel
244 printf("kldload: unexpected R_COPY relocation\n");
248 case R_ARM_JUMP_SLOT:
249 error = lookup(lf, symidx, 1, &addr);
251 store_ptr(where, addr);
259 printf("kldload: unexpected relocation type %d\n",
267 elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
268 elf_lookup_fn lookup)
271 return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup));
275 elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
276 int type, elf_lookup_fn lookup)
279 return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup));
283 elf_cpu_load_file(linker_file_t lf)
287 * The pmap code does not do an icache sync upon establishing executable
288 * mappings in the kernel pmap. It's an optimization based on the fact
289 * that kernel memory allocations always have EXECUTABLE protection even
290 * when the memory isn't going to hold executable code. The only time
291 * kernel memory holding instructions does need a sync is after loading
292 * a kernel module, and that's when this function gets called.
294 * This syncs data and instruction caches after loading a module. We
295 * don't worry about the kernel itself (lf->id is 1) as locore.S did
296 * that on entry. Even if data cache maintenance was done by IO code,
297 * the relocation fixup process creates dirty cache entries that we must
298 * write back before doing icache sync. The instruction cache sync also
299 * invalidates the branch predictor cache on platforms that have one.
304 dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
307 cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
308 cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
309 cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
315 elf_cpu_unload_file(linker_file_t lf __unused)