2 * Copyright (c) 1994 Sean Eric Fagan
3 * Copyright (c) 1994 Søren Schmidt
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer
11 * in this position and unchanged.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/kernel.h>
40 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_extern.h>
52 #include <i386/ibcs2/coff.h>
53 #include <i386/ibcs2/ibcs2_util.h>
55 MODULE_DEPEND(coff, ibcs2, 1, 1, 1);
57 extern struct sysentvec ibcs2_svr3_sysvec;
59 static int coff_load_file(struct thread *td, char *name);
60 static int exec_coff_imgact(struct image_params *imgp);
62 static int load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
65 load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
66 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
69 vm_offset_t map_offset;
72 unsigned char *data_buf = 0;
75 map_offset = trunc_page(offset);
76 map_addr = trunc_page((vm_offset_t)vmaddr);
80 * We have the stupid situation that
81 * the section is longer than it is on file,
82 * which means it has zero-filled areas, and
83 * we have to work for it. Stupid iBCS!
85 map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
88 * The only stuff we care about is on disk, and we
89 * don't care if we map in more than is really there.
91 map_len = round_page(offset + filsz) - trunc_page(map_offset);
94 DPRINTF(("%s(%d): vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
95 "VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
96 __FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
99 if ((error = vm_mmap(&vmspace->vm_map,
104 MAP_PRIVATE | MAP_FIXED,
110 if (memsz == filsz) {
116 * Now we have screwball stuff, to accomodate stupid COFF.
117 * We have to map the remaining bit of the file into the kernel's
118 * memory map, allocate some anonymous memory, copy that last
119 * bit into it, and then we're done. *sigh*
120 * For clean-up reasons, we actally map in the file last.
123 copy_len = (offset + filsz) - trunc_page(offset + filsz);
124 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
125 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
127 DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));
130 error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
131 map_len, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
133 return (vm_mmap_to_errno(error));
136 if ((error = vm_mmap(exec_map,
137 (vm_offset_t *) &data_buf,
144 trunc_page(offset + filsz))) != 0)
147 error = copyout(data_buf, (caddr_t) map_addr, copy_len);
149 kmem_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
155 coff_load_file(struct thread *td, char *name)
157 struct proc *p = td->td_proc;
158 struct vmspace *vmspace = p->p_vmspace;
163 struct filehdr *fhdr;
164 struct aouthdr *ahdr;
168 unsigned long text_offset = 0, text_address = 0, text_size = 0;
169 unsigned long data_offset = 0, data_address = 0, data_size = 0;
170 unsigned long bss_size = 0;
173 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
174 UIO_SYSSPACE, name, td);
184 error = VOP_GET_WRITECOUNT(vp, &writecount);
187 if (writecount != 0) {
192 if ((error = VOP_GETATTR(vp, &attr, td->td_ucred)) != 0)
195 if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
196 || ((attr.va_mode & 0111) == 0)
197 || (attr.va_type != VREG))
200 if (attr.va_size == 0) {
205 if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
208 if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
212 * Lose the lock on the vnode. It's no longer needed, and must not
213 * exist for the pagefault paging to work below.
217 if ((error = vm_mmap(exec_map,
218 (vm_offset_t *) &ptr,
228 fhdr = (struct filehdr *)ptr;
230 if (fhdr->f_magic != I386_COFF) {
232 goto dealloc_and_fail;
235 nscns = fhdr->f_nscns;
237 if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
239 * XXX -- just fail. I'm so lazy.
242 goto dealloc_and_fail;
245 ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));
247 scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
248 + sizeof(struct aouthdr));
250 for (i = 0; i < nscns; i++) {
251 if (scns[i].s_flags & STYP_NOLOAD)
253 else if (scns[i].s_flags & STYP_TEXT) {
254 text_address = scns[i].s_vaddr;
255 text_size = scns[i].s_size;
256 text_offset = scns[i].s_scnptr;
258 else if (scns[i].s_flags & STYP_DATA) {
259 data_address = scns[i].s_vaddr;
260 data_size = scns[i].s_size;
261 data_offset = scns[i].s_scnptr;
262 } else if (scns[i].s_flags & STYP_BSS) {
263 bss_size = scns[i].s_size;
267 if ((error = load_coff_section(vmspace, vp, text_offset,
268 (caddr_t)(void *)(uintptr_t)text_address,
269 text_size, text_size,
270 VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
271 goto dealloc_and_fail;
273 if ((error = load_coff_section(vmspace, vp, data_offset,
274 (caddr_t)(void *)(uintptr_t)data_address,
275 data_size + bss_size, data_size,
276 VM_PROT_ALL)) != 0) {
277 goto dealloc_and_fail;
283 kmem_free_wakeup(exec_map, (vm_offset_t)ptr, PAGE_SIZE);
287 NDFREE(&nd, NDF_ONLY_PNBUF);
293 exec_coff_imgact(imgp)
294 struct image_params *imgp;
296 const struct filehdr *fhdr = (const struct filehdr*)imgp->image_header;
297 const struct aouthdr *ahdr;
298 const struct scnhdr *scns;
300 struct vmspace *vmspace;
303 unsigned long text_offset = 0, text_address = 0, text_size = 0;
304 unsigned long data_offset = 0, data_address = 0, data_size = 0;
305 unsigned long bss_size = 0;
308 if (fhdr->f_magic != I386_COFF ||
309 !(fhdr->f_flags & F_EXEC)) {
311 DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
315 nscns = fhdr->f_nscns;
316 if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
318 * For now, return an error -- need to be able to
319 * read in all of the section structures.
322 DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
326 ahdr = (const struct aouthdr*)
327 ((const char*)(imgp->image_header) + sizeof(struct filehdr));
328 imgp->entry_addr = ahdr->entry;
330 scns = (const struct scnhdr*)
331 ((const char*)(imgp->image_header) + sizeof(struct filehdr) +
332 sizeof(struct aouthdr));
334 VOP_UNLOCK(imgp->vp, 0);
336 error = exec_new_vmspace(imgp, &ibcs2_svr3_sysvec);
339 vmspace = imgp->proc->p_vmspace;
341 for (i = 0; i < nscns; i++) {
343 DPRINTF(("i = %d, s_name = %s, s_vaddr = %08lx, "
344 "s_scnptr = %ld s_size = %lx\n", i, scns[i].s_name,
345 scns[i].s_vaddr, scns[i].s_scnptr, scns[i].s_size));
346 if (scns[i].s_flags & STYP_NOLOAD) {
348 * A section that is not loaded, for whatever
349 * reason. It takes precedance over other flag
353 } else if (scns[i].s_flags & STYP_TEXT) {
354 text_address = scns[i].s_vaddr;
355 text_size = scns[i].s_size;
356 text_offset = scns[i].s_scnptr;
357 } else if (scns[i].s_flags & STYP_DATA) {
359 data_address = scns[i].s_vaddr;
360 data_size = scns[i].s_size;
361 data_offset = scns[i].s_scnptr;
362 } else if (scns[i].s_flags & STYP_BSS) {
364 bss_size = scns[i].s_size;
365 } else if (scns[i].s_flags & STYP_LIB) {
367 int foff = trunc_page(scns[i].s_scnptr);
368 int off = scns[i].s_scnptr - foff;
369 int len = round_page(scns[i].s_size + PAGE_SIZE);
372 if ((error = vm_mmap(exec_map,
373 (vm_offset_t *) &buf,
386 int emul_path_len = strlen(ibcs2_emul_path);
388 libbuf = malloc(MAXPATHLEN + emul_path_len,
390 strcpy(libbuf, ibcs2_emul_path);
392 for (j = off; j < scns[i].s_size + off;) {
393 long stroff, nextoff;
396 nextoff = 4 * *(long *)(buf + j);
397 stroff = 4 * *(long *)(buf + j + sizeof(long));
399 libname = buf + j + stroff;
402 DPRINTF(("%s(%d): shared library %s\n",
403 __FILE__, __LINE__, libname));
404 strlcpy(&libbuf[emul_path_len], libname, MAXPATHLEN);
405 error = coff_load_file(
406 FIRST_THREAD_IN_PROC(imgp->proc), libbuf);
408 error = coff_load_file(
409 FIRST_THREAD_IN_PROC(imgp->proc),
413 "error %d loading coff shared library %s\n",
418 free(libbuf, M_TEMP);
420 kmem_free_wakeup(exec_map, (vm_offset_t)buf, len);
429 DPRINTF(("%s(%d): load_coff_section(vmspace, "
430 "imgp->vp, %08lx, %08lx, 0x%lx, 0x%lx, 0x%x)\n",
431 __FILE__, __LINE__, text_offset, text_address,
432 text_size, text_size, VM_PROT_READ | VM_PROT_EXECUTE));
433 if ((error = load_coff_section(vmspace, imgp->vp,
435 (caddr_t)(void *)(uintptr_t)text_address,
436 text_size, text_size,
437 VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
438 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
442 * Map in .data and .bss now
446 DPRINTF(("%s(%d): load_coff_section(vmspace, "
447 "imgp->vp, 0x%08lx, 0x%08lx, 0x%lx, 0x%lx, 0x%x)\n",
448 __FILE__, __LINE__, data_offset, data_address,
449 data_size + bss_size, data_size, VM_PROT_ALL));
450 if ((error = load_coff_section(vmspace, imgp->vp,
452 (caddr_t)(void *)(uintptr_t)data_address,
453 data_size + bss_size, data_size,
454 VM_PROT_ALL)) != 0) {
456 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
460 imgp->interpreted = 0;
461 imgp->proc->p_sysent = &ibcs2_svr3_sysvec;
463 vmspace->vm_tsize = round_page(text_size) >> PAGE_SHIFT;
464 vmspace->vm_dsize = round_page(data_size + bss_size) >> PAGE_SHIFT;
465 vmspace->vm_taddr = (caddr_t)(void *)(uintptr_t)text_address;
466 vmspace->vm_daddr = (caddr_t)(void *)(uintptr_t)data_address;
468 hole = trunc_page((vm_offset_t)vmspace->vm_daddr +
469 ctob(vmspace->vm_dsize));
471 DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%jx, PAGE_SIZE, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n",
472 __FILE__, __LINE__, (uintmax_t)hole));
473 DPRINTF(("imgact: error = %d\n", error));
475 vm_map_find(&vmspace->vm_map, NULL, 0,
476 (vm_offset_t *)&hole, PAGE_SIZE, VMFS_NO_SPACE,
477 VM_PROT_ALL, VM_PROT_ALL, 0);
478 DPRINTF(("IBCS2: start vm_dsize = 0x%x, vm_daddr = 0x%p end = 0x%p\n",
479 ctob(vmspace->vm_dsize), vmspace->vm_daddr,
480 ctob(vmspace->vm_dsize) + vmspace->vm_daddr ));
481 DPRINTF(("%s(%d): returning %d!\n", __FILE__, __LINE__, error));
484 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
490 * Tell kern_execve.c about it, with a little help from the linker.
492 static struct execsw coff_execsw = { exec_coff_imgact, "coff" };
493 EXEC_SET(coff, coff_execsw);