]> CyberLeo.Net >> Repos - FreeBSD/releng/9.1.git/blob - sys/i386/ibcs2/imgact_coff.c
Copy stable/9 to releng/9.1 as part of the 9.1-RELEASE release process.
[FreeBSD/releng/9.1.git] / sys / i386 / ibcs2 / imgact_coff.c
1 /*-
2  * Copyright (c) 1994 Sean Eric Fagan
3  * Copyright (c) 1994 Søren Schmidt
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer
11  *    in this position and unchanged.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/exec.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_extern.h>
51
52 #include <i386/ibcs2/coff.h>
53 #include <i386/ibcs2/ibcs2_util.h>
54
55 MODULE_DEPEND(coff, ibcs2, 1, 1, 1);
56
57 extern struct sysentvec ibcs2_svr3_sysvec;
58
59 static int coff_load_file(struct thread *td, char *name);
60 static int exec_coff_imgact(struct image_params *imgp);
61
62 static int load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
63
64 static int
65 load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
66                   caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
67 {
68         size_t map_len;
69         vm_offset_t map_offset;
70         vm_offset_t map_addr;
71         int error;
72         unsigned char *data_buf = 0;
73         size_t copy_len;
74
75         map_offset = trunc_page(offset);
76         map_addr = trunc_page((vm_offset_t)vmaddr);
77
78         if (memsz > filsz) {
79                 /*
80                  * We have the stupid situation that
81                  * the section is longer than it is on file,
82                  * which means it has zero-filled areas, and
83                  * we have to work for it.  Stupid iBCS!
84                  */
85                 map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
86         } else {
87                 /*
88                  * The only stuff we care about is on disk, and we
89                  * don't care if we map in more than is really there.
90                  */
91                 map_len = round_page(offset + filsz) - trunc_page(map_offset);
92         }
93
94         DPRINTF(("%s(%d):  vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
95                 "VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
96                 __FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
97                 map_offset));
98
99         if ((error = vm_mmap(&vmspace->vm_map,
100                              &map_addr,
101                              map_len,
102                              prot,
103                              VM_PROT_ALL,
104                              MAP_PRIVATE | MAP_FIXED,
105                              OBJT_VNODE,
106                              vp,
107                              map_offset)) != 0)
108                 return error;
109
110         if (memsz == filsz) {
111                 /* We're done! */
112                 return 0;
113         }
114
115         /*
116          * Now we have screwball stuff, to accomodate stupid COFF.
117          * We have to map the remaining bit of the file into the kernel's
118          * memory map, allocate some anonymous memory, copy that last
119          * bit into it, and then we're done. *sigh*
120          * For clean-up reasons, we actally map in the file last.
121          */
122
123         copy_len = (offset + filsz) - trunc_page(offset + filsz);
124         map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
125         map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
126
127         DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));
128
129         if (map_len != 0) {
130                 error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
131                     map_len, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
132                 if (error)
133                         return (vm_mmap_to_errno(error));
134         }
135
136         if ((error = vm_mmap(exec_map,
137                             (vm_offset_t *) &data_buf,
138                             PAGE_SIZE,
139                             VM_PROT_READ,
140                             VM_PROT_READ,
141                             0,
142                             OBJT_VNODE,
143                             vp,
144                             trunc_page(offset + filsz))) != 0)
145                 return error;
146
147         error = copyout(data_buf, (caddr_t) map_addr, copy_len);
148
149         kmem_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
150
151         return error;
152 }
153
154 static int
155 coff_load_file(struct thread *td, char *name)
156 {
157         struct proc *p = td->td_proc;
158         struct vmspace *vmspace = p->p_vmspace;
159         int error;
160         struct nameidata nd;
161         struct vnode *vp;
162         struct vattr attr;
163         struct filehdr *fhdr;
164         struct aouthdr *ahdr;
165         struct scnhdr *scns;
166         char *ptr = 0;
167         int nscns;
168         unsigned long text_offset = 0, text_address = 0, text_size = 0;
169         unsigned long data_offset = 0, data_address = 0, data_size = 0;
170         unsigned long bss_size = 0;
171         int i;
172
173         NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
174             UIO_SYSSPACE, name, td);
175
176         error = namei(&nd);
177         if (error)
178                 return error;
179
180         vp = nd.ni_vp;
181         if (vp == NULL)
182                 return ENOEXEC;
183
184         if (vp->v_writecount) {
185                 error = ETXTBSY;
186                 goto fail;
187         }
188
189         if ((error = VOP_GETATTR(vp, &attr, td->td_ucred)) != 0)
190                 goto fail;
191
192         if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
193             || ((attr.va_mode & 0111) == 0)
194             || (attr.va_type != VREG))
195                 goto fail;
196
197         if (attr.va_size == 0) {
198                 error = ENOEXEC;
199                 goto fail;
200         }
201
202         if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
203                 goto fail;
204
205         if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
206                 goto fail;
207
208         /*
209          * Lose the lock on the vnode. It's no longer needed, and must not
210          * exist for the pagefault paging to work below.
211          */
212         VOP_UNLOCK(vp, 0);
213
214         if ((error = vm_mmap(exec_map,
215                             (vm_offset_t *) &ptr,
216                             PAGE_SIZE,
217                             VM_PROT_READ,
218                             VM_PROT_READ,
219                             0,
220                             OBJT_VNODE,
221                             vp,
222                             0)) != 0)
223                 goto unlocked_fail;
224
225         fhdr = (struct filehdr *)ptr;
226
227         if (fhdr->f_magic != I386_COFF) {
228                 error = ENOEXEC;
229                 goto dealloc_and_fail;
230         }
231
232         nscns = fhdr->f_nscns;
233
234         if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
235                 /*
236                  * XXX -- just fail.  I'm so lazy.
237                  */
238                 error = ENOEXEC;
239                 goto dealloc_and_fail;
240         }
241
242         ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));
243
244         scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
245                           + sizeof(struct aouthdr));
246
247         for (i = 0; i < nscns; i++) {
248                 if (scns[i].s_flags & STYP_NOLOAD)
249                         continue;
250                 else if (scns[i].s_flags & STYP_TEXT) {
251                         text_address = scns[i].s_vaddr;
252                         text_size = scns[i].s_size;
253                         text_offset = scns[i].s_scnptr;
254                 }
255                 else if (scns[i].s_flags & STYP_DATA) {
256                         data_address = scns[i].s_vaddr;
257                         data_size = scns[i].s_size;
258                         data_offset = scns[i].s_scnptr;
259                 } else if (scns[i].s_flags & STYP_BSS) {
260                         bss_size = scns[i].s_size;
261                 }
262         }
263
264         if ((error = load_coff_section(vmspace, vp, text_offset,
265                                       (caddr_t)(void *)(uintptr_t)text_address,
266                                       text_size, text_size,
267                                       VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
268                 goto dealloc_and_fail;
269         }
270         if ((error = load_coff_section(vmspace, vp, data_offset,
271                                       (caddr_t)(void *)(uintptr_t)data_address,
272                                       data_size + bss_size, data_size,
273                                       VM_PROT_ALL)) != 0) {
274                 goto dealloc_and_fail;
275         }
276
277         error = 0;
278
279  dealloc_and_fail:
280         kmem_free_wakeup(exec_map, (vm_offset_t)ptr,  PAGE_SIZE);
281  fail:
282         VOP_UNLOCK(vp, 0);
283  unlocked_fail:
284         NDFREE(&nd, NDF_ONLY_PNBUF);
285         vrele(nd.ni_vp);
286         return error;
287 }
288
289 static int
290 exec_coff_imgact(imgp)
291         struct image_params *imgp;
292 {
293         const struct filehdr *fhdr = (const struct filehdr*)imgp->image_header;
294         const struct aouthdr *ahdr;
295         const struct scnhdr *scns;
296         int i;
297         struct vmspace *vmspace;
298         int nscns;
299         int error;
300         unsigned long text_offset = 0, text_address = 0, text_size = 0;
301         unsigned long data_offset = 0, data_address = 0, data_size = 0;
302         unsigned long bss_size = 0;
303         vm_offset_t hole;
304
305         if (fhdr->f_magic != I386_COFF ||
306             !(fhdr->f_flags & F_EXEC)) {
307
308                  DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
309                  return -1;
310         }
311
312         nscns = fhdr->f_nscns;
313         if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
314                 /*
315                  * For now, return an error -- need to be able to
316                  * read in all of the section structures.
317                  */
318
319                 DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
320                 return -1;
321         }
322
323         ahdr = (const struct aouthdr*)
324                ((const char*)(imgp->image_header) + sizeof(struct filehdr));
325         imgp->entry_addr = ahdr->entry;
326
327         scns = (const struct scnhdr*)
328                ((const char*)(imgp->image_header) + sizeof(struct filehdr) +
329                 sizeof(struct aouthdr));
330
331         VOP_UNLOCK(imgp->vp, 0);
332
333         error = exec_new_vmspace(imgp, &ibcs2_svr3_sysvec);
334         if (error)
335                 goto fail;
336         vmspace = imgp->proc->p_vmspace;
337
338         for (i = 0; i < nscns; i++) {
339
340           DPRINTF(("i = %d, s_name = %s, s_vaddr = %08lx, "
341                    "s_scnptr = %ld s_size = %lx\n", i, scns[i].s_name,
342                    scns[i].s_vaddr, scns[i].s_scnptr, scns[i].s_size));
343           if (scns[i].s_flags & STYP_NOLOAD) {
344                 /*
345                  * A section that is not loaded, for whatever
346                  * reason.  It takes precedance over other flag
347                  * bits...
348                  */
349                 continue;
350           } else if (scns[i].s_flags & STYP_TEXT) {
351                 text_address = scns[i].s_vaddr;
352                 text_size = scns[i].s_size;
353                 text_offset = scns[i].s_scnptr;
354           } else if (scns[i].s_flags & STYP_DATA) {
355                 /* .data section */
356                 data_address = scns[i].s_vaddr;
357                 data_size = scns[i].s_size;
358                 data_offset = scns[i].s_scnptr;
359           } else if (scns[i].s_flags & STYP_BSS) {
360                 /* .bss section */
361                 bss_size = scns[i].s_size;
362           } else if (scns[i].s_flags & STYP_LIB) {
363                 char *buf = 0;
364                 int foff = trunc_page(scns[i].s_scnptr);
365                 int off = scns[i].s_scnptr - foff;
366                 int len = round_page(scns[i].s_size + PAGE_SIZE);
367                 int j;
368
369                 if ((error = vm_mmap(exec_map,
370                                     (vm_offset_t *) &buf,
371                                     len,
372                                     VM_PROT_READ,
373                                     VM_PROT_READ,
374                                     MAP_SHARED,
375                                     OBJT_VNODE,
376                                     imgp->vp,
377                                     foff)) != 0) {
378                         error = ENOEXEC;
379                         goto fail;
380                 }
381                 if(scns[i].s_size) {
382                         char *libbuf;
383                         int emul_path_len = strlen(ibcs2_emul_path);
384
385                         libbuf = malloc(MAXPATHLEN + emul_path_len,
386                                         M_TEMP, M_WAITOK);
387                         strcpy(libbuf, ibcs2_emul_path);
388
389                         for (j = off; j < scns[i].s_size + off;) {
390                                 long stroff, nextoff;
391                                 char *libname;
392
393                                 nextoff = 4 * *(long *)(buf + j);
394                                 stroff = 4 * *(long *)(buf + j + sizeof(long));
395
396                                 libname = buf + j + stroff;
397                                 j += nextoff;
398
399                                 DPRINTF(("%s(%d):  shared library %s\n",
400                                          __FILE__, __LINE__, libname));
401                                 strlcpy(&libbuf[emul_path_len], libname, MAXPATHLEN);
402                                 error = coff_load_file(
403                                     FIRST_THREAD_IN_PROC(imgp->proc), libbuf);
404                                 if (error)
405                                         error = coff_load_file(
406                                             FIRST_THREAD_IN_PROC(imgp->proc),
407                                             libname);
408                                 if (error) {
409                                         printf(
410                                 "error %d loading coff shared library %s\n",
411                                             error, libname);
412                                         break;
413                                 }
414                         }
415                         free(libbuf, M_TEMP);
416                 }
417                 kmem_free_wakeup(exec_map, (vm_offset_t)buf, len);
418                 if (error)
419                         goto fail;
420                 }
421         }
422         /*
423          * Map in .text now
424          */
425
426         DPRINTF(("%s(%d):  load_coff_section(vmspace, "
427                 "imgp->vp, %08lx, %08lx, 0x%lx, 0x%lx, 0x%x)\n",
428                 __FILE__, __LINE__, text_offset, text_address,
429                 text_size, text_size, VM_PROT_READ | VM_PROT_EXECUTE));
430         if ((error = load_coff_section(vmspace, imgp->vp,
431                                       text_offset,
432                                       (caddr_t)(void *)(uintptr_t)text_address,
433                                       text_size, text_size,
434                                       VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
435                 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
436                 goto fail;
437         }
438         /*
439          * Map in .data and .bss now
440          */
441
442
443         DPRINTF(("%s(%d): load_coff_section(vmspace, "
444                 "imgp->vp, 0x%08lx, 0x%08lx, 0x%lx, 0x%lx, 0x%x)\n",
445                 __FILE__, __LINE__, data_offset, data_address,
446                 data_size + bss_size, data_size, VM_PROT_ALL));
447         if ((error = load_coff_section(vmspace, imgp->vp,
448                                       data_offset,
449                                       (caddr_t)(void *)(uintptr_t)data_address,
450                                       data_size + bss_size, data_size,
451                                       VM_PROT_ALL)) != 0) {
452
453                 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
454                 goto fail;
455         }
456
457         imgp->interpreted = 0;
458         imgp->proc->p_sysent = &ibcs2_svr3_sysvec;
459
460         vmspace->vm_tsize = round_page(text_size) >> PAGE_SHIFT;
461         vmspace->vm_dsize = round_page(data_size + bss_size) >> PAGE_SHIFT;
462         vmspace->vm_taddr = (caddr_t)(void *)(uintptr_t)text_address;
463         vmspace->vm_daddr = (caddr_t)(void *)(uintptr_t)data_address;
464
465         hole = trunc_page((vm_offset_t)vmspace->vm_daddr +
466             ctob(vmspace->vm_dsize));
467
468         DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%jx, PAGE_SIZE, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n",
469             __FILE__, __LINE__, (uintmax_t)hole));
470         DPRINTF(("imgact: error = %d\n", error));
471
472         vm_map_find(&vmspace->vm_map, NULL, 0,
473             (vm_offset_t *)&hole, PAGE_SIZE, VMFS_NO_SPACE,
474             VM_PROT_ALL, VM_PROT_ALL, 0);
475         DPRINTF(("IBCS2: start vm_dsize = 0x%x, vm_daddr = 0x%p end = 0x%p\n",
476                 ctob(vmspace->vm_dsize), vmspace->vm_daddr,
477                 ctob(vmspace->vm_dsize) + vmspace->vm_daddr ));
478         DPRINTF(("%s(%d):  returning %d!\n", __FILE__, __LINE__, error));
479
480 fail:
481         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
482
483         return (error);
484 }
485
486 /*
487  * Tell kern_execve.c about it, with a little help from the linker.
488  */
489 static struct execsw coff_execsw = { exec_coff_imgact, "coff" };
490 EXEC_SET(coff, coff_execsw);