2 * Copyright (c) 2013 Dmitry Chagin
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_compat.h"
32 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
33 #define __ELF_WORD_SIZE 32
35 #define __ELF_WORD_SIZE 64
38 #include <sys/param.h>
39 #include <sys/systm.h>
41 #include <sys/kernel.h>
43 #include <sys/rwlock.h>
44 #include <sys/queue.h>
45 #include <sys/sysent.h>
48 #include <vm/vm_param.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_pager.h>
57 #include <compat/linux/linux_vdso.h>
59 SLIST_HEAD(, linux_vdso_sym) __elfN(linux_vdso_syms) =
60 SLIST_HEAD_INITIALIZER(__elfN(linux_vdso_syms));
62 static int __elfN(symtabindex);
63 static int __elfN(symstrindex);
66 __elfN(linux_vdso_lookup)(Elf_Ehdr *, struct linux_vdso_sym *);
70 __elfN(linux_vdso_sym_init)(struct linux_vdso_sym *s)
73 SLIST_INSERT_HEAD(&__elfN(linux_vdso_syms), s, sym);
77 __elfN(linux_shared_page_init)(char **mapping)
83 obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
84 VM_PROT_DEFAULT, 0, NULL);
86 m = vm_page_grab(obj, 0, VM_ALLOC_ZERO);
87 VM_OBJECT_WUNLOCK(obj);
90 addr = kva_alloc(PAGE_SIZE);
91 pmap_qenter(addr, &m, 1);
92 *mapping = (char *)addr;
97 __elfN(linux_shared_page_fini)(vm_object_t obj)
100 vm_object_deallocate(obj);
104 __elfN(linux_vdso_fixup)(struct sysentvec *sv)
110 ehdr = (Elf_Ehdr *) sv->sv_sigcode;
112 if (!IS_ELF(*ehdr) ||
113 ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
114 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
115 ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
116 ehdr->e_shoff == 0 ||
117 ehdr->e_shentsize != sizeof(Elf_Shdr))
118 panic("Linux invalid vdso header.\n");
120 if (ehdr->e_type != ET_DYN)
121 panic("Linux invalid vdso header.\n");
123 shdr = (Elf_Shdr *) ((caddr_t)ehdr + ehdr->e_shoff);
125 __elfN(symtabindex) = -1;
126 __elfN(symstrindex) = -1;
127 for (i = 0; i < ehdr->e_shnum; i++) {
128 if (shdr[i].sh_size == 0)
130 if (shdr[i].sh_type == SHT_DYNSYM) {
131 __elfN(symtabindex) = i;
132 __elfN(symstrindex) = shdr[i].sh_link;
136 if (__elfN(symtabindex) == -1 || __elfN(symstrindex) == -1)
137 panic("Linux invalid vdso header.\n");
139 ehdr->e_ident[EI_OSABI] = ELFOSABI_LINUX;
143 __elfN(linux_vdso_reloc)(struct sysentvec *sv)
145 struct linux_vdso_sym *lsym;
153 ehdr = (Elf_Ehdr *) sv->sv_sigcode;
155 /* Adjust our so relative to the sigcode_base */
156 if (sv->sv_shared_page_base != 0) {
157 ehdr->e_entry += sv->sv_shared_page_base;
158 phdr = (Elf_Phdr *)((caddr_t)ehdr + ehdr->e_phoff);
161 for (i = 0; i < ehdr->e_phnum; i++) {
162 phdr[i].p_vaddr += sv->sv_shared_page_base;
163 if (phdr[i].p_type != PT_DYNAMIC)
165 dyn = (Elf_Dyn *)((caddr_t)ehdr + phdr[i].p_offset);
166 for(; dyn->d_tag != DT_NULL; dyn++) {
167 switch (dyn->d_tag) {
181 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
182 dyn->d_un.d_ptr += sv->sv_shared_page_base;
184 case DT_ENCODING ... DT_LOOS-1:
185 case DT_LOOS ... DT_HIOS:
186 if (dyn->d_tag >= DT_ENCODING &&
187 (dyn->d_tag & 1) == 0)
188 dyn->d_un.d_ptr += sv->sv_shared_page_base;
197 shdr = (Elf_Shdr *)((caddr_t)ehdr + ehdr->e_shoff);
198 for(i = 0; i < ehdr->e_shnum; i++) {
199 if (!(shdr[i].sh_flags & SHF_ALLOC))
201 shdr[i].sh_addr += sv->sv_shared_page_base;
202 if (shdr[i].sh_type != SHT_SYMTAB &&
203 shdr[i].sh_type != SHT_DYNSYM)
206 sym = (Elf_Sym *)((caddr_t)ehdr + shdr[i].sh_offset);
207 symcnt = shdr[i].sh_size / sizeof(*sym);
209 for(j = 0; j < symcnt; j++, sym++) {
210 if (sym->st_shndx == SHN_UNDEF ||
211 sym->st_shndx == SHN_ABS)
213 sym->st_value += sv->sv_shared_page_base;
218 SLIST_FOREACH(lsym, &__elfN(linux_vdso_syms), sym)
219 __elfN(linux_vdso_lookup)(ehdr, lsym);
223 __elfN(linux_vdso_lookup)(Elf_Ehdr *ehdr, struct linux_vdso_sym *vsym)
225 vm_offset_t strtab, symname;
230 shdr = (Elf_Shdr *) ((caddr_t)ehdr + ehdr->e_shoff);
232 strtab = (vm_offset_t)((caddr_t)ehdr +
233 shdr[__elfN(symstrindex)].sh_offset);
234 Elf_Sym *sym = (Elf_Sym *)((caddr_t)ehdr +
235 shdr[__elfN(symtabindex)].sh_offset);
236 symcnt = shdr[__elfN(symtabindex)].sh_size / sizeof(*sym);
238 for (i = 0; i < symcnt; ++i, ++sym) {
239 symname = strtab + sym->st_name;
240 if (strncmp(vsym->symname, (char *)symname, vsym->size) == 0) {
241 *vsym->ptr = (uintptr_t)sym->st_value;