]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/i386/ibcs2/imgact_coff.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / i386 / ibcs2 / imgact_coff.c
1 /*-
2  * Copyright (c) 1994 Sean Eric Fagan
3  * Copyright (c) 1994 Søren Schmidt
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer
11  *    in this position and unchanged.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/exec.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/mount.h>
43 #include <sys/namei.h>
44 #include <sys/vnode.h>
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_extern.h>
51
52 #include <i386/ibcs2/coff.h>
53 #include <i386/ibcs2/ibcs2_util.h>
54
55 MODULE_DEPEND(coff, ibcs2, 1, 1, 1);
56
57 extern struct sysentvec ibcs2_svr3_sysvec;
58
59 static int coff_load_file(struct thread *td, char *name);
60 static int exec_coff_imgact(struct image_params *imgp);
61
62 static int load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
63
64 static int
65 load_coff_section(struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset,
66                   caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
67 {
68         size_t map_len;
69         vm_offset_t map_offset;
70         vm_offset_t map_addr;
71         int error;
72         unsigned char *data_buf = 0;
73         size_t copy_len;
74
75         map_offset = trunc_page(offset);
76         map_addr = trunc_page((vm_offset_t)vmaddr);
77
78         if (memsz > filsz) {
79                 /*
80                  * We have the stupid situation that
81                  * the section is longer than it is on file,
82                  * which means it has zero-filled areas, and
83                  * we have to work for it.  Stupid iBCS!
84                  */
85                 map_len = trunc_page(offset + filsz) - trunc_page(map_offset);
86         } else {
87                 /*
88                  * The only stuff we care about is on disk, and we
89                  * don't care if we map in more than is really there.
90                  */
91                 map_len = round_page(offset + filsz) - trunc_page(map_offset);
92         }
93
94         DPRINTF(("%s(%d):  vm_mmap(&vmspace->vm_map, &0x%08jx, 0x%x, 0x%x, "
95                 "VM_PROT_ALL, MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, 0x%x)\n",
96                 __FILE__, __LINE__, (uintmax_t)map_addr, map_len, prot,
97                 map_offset));
98
99         if ((error = vm_mmap(&vmspace->vm_map,
100                              &map_addr,
101                              map_len,
102                              prot,
103                              VM_PROT_ALL,
104                              MAP_PRIVATE | MAP_FIXED,
105                              OBJT_VNODE,
106                              vp,
107                              map_offset)) != 0)
108                 return error;
109
110         if (memsz == filsz) {
111                 /* We're done! */
112                 return 0;
113         }
114
115         /*
116          * Now we have screwball stuff, to accomodate stupid COFF.
117          * We have to map the remaining bit of the file into the kernel's
118          * memory map, allocate some anonymous memory, copy that last
119          * bit into it, and then we're done. *sigh*
120          * For clean-up reasons, we actally map in the file last.
121          */
122
123         copy_len = (offset + filsz) - trunc_page(offset + filsz);
124         map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
125         map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
126
127         DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%08jx,0x%x, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0)\n", __FILE__, __LINE__, (uintmax_t)map_addr, map_len));
128
129         if (map_len != 0) {
130                 error = vm_map_find(&vmspace->vm_map, NULL, 0, &map_addr,
131                     map_len, 0, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
132                 if (error)
133                         return (vm_mmap_to_errno(error));
134         }
135
136         if ((error = vm_mmap(exec_map,
137                             (vm_offset_t *) &data_buf,
138                             PAGE_SIZE,
139                             VM_PROT_READ,
140                             VM_PROT_READ,
141                             0,
142                             OBJT_VNODE,
143                             vp,
144                             trunc_page(offset + filsz))) != 0)
145                 return error;
146
147         error = copyout(data_buf, (caddr_t) map_addr, copy_len);
148
149         kmap_free_wakeup(exec_map, (vm_offset_t)data_buf, PAGE_SIZE);
150
151         return error;
152 }
153
154 static int
155 coff_load_file(struct thread *td, char *name)
156 {
157         struct proc *p = td->td_proc;
158         struct vmspace *vmspace = p->p_vmspace;
159         int error;
160         struct nameidata nd;
161         struct vnode *vp;
162         struct vattr attr;
163         struct filehdr *fhdr;
164         struct aouthdr *ahdr;
165         struct scnhdr *scns;
166         char *ptr = 0;
167         int nscns;
168         unsigned long text_offset = 0, text_address = 0, text_size = 0;
169         unsigned long data_offset = 0, data_address = 0, data_size = 0;
170         unsigned long bss_size = 0;
171         int i, writecount;
172
173         NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
174             UIO_SYSSPACE, name, td);
175
176         error = namei(&nd);
177         if (error)
178                 return error;
179
180         vp = nd.ni_vp;
181         if (vp == NULL)
182                 return ENOEXEC;
183
184         error = VOP_GET_WRITECOUNT(vp, &writecount);
185         if (error != 0)
186                 goto fail;
187         if (writecount != 0) {
188                 error = ETXTBSY;
189                 goto fail;
190         }
191
192         if ((error = VOP_GETATTR(vp, &attr, td->td_ucred)) != 0)
193                 goto fail;
194
195         if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
196             || ((attr.va_mode & 0111) == 0)
197             || (attr.va_type != VREG))
198                 goto fail;
199
200         if (attr.va_size == 0) {
201                 error = ENOEXEC;
202                 goto fail;
203         }
204
205         if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
206                 goto fail;
207
208         if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
209                 goto fail;
210
211         /*
212          * Lose the lock on the vnode. It's no longer needed, and must not
213          * exist for the pagefault paging to work below.
214          */
215         VOP_UNLOCK(vp, 0);
216
217         if ((error = vm_mmap(exec_map,
218                             (vm_offset_t *) &ptr,
219                             PAGE_SIZE,
220                             VM_PROT_READ,
221                             VM_PROT_READ,
222                             0,
223                             OBJT_VNODE,
224                             vp,
225                             0)) != 0)
226                 goto unlocked_fail;
227
228         fhdr = (struct filehdr *)ptr;
229
230         if (fhdr->f_magic != I386_COFF) {
231                 error = ENOEXEC;
232                 goto dealloc_and_fail;
233         }
234
235         nscns = fhdr->f_nscns;
236
237         if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
238                 /*
239                  * XXX -- just fail.  I'm so lazy.
240                  */
241                 error = ENOEXEC;
242                 goto dealloc_and_fail;
243         }
244
245         ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));
246
247         scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
248                           + sizeof(struct aouthdr));
249
250         for (i = 0; i < nscns; i++) {
251                 if (scns[i].s_flags & STYP_NOLOAD)
252                         continue;
253                 else if (scns[i].s_flags & STYP_TEXT) {
254                         text_address = scns[i].s_vaddr;
255                         text_size = scns[i].s_size;
256                         text_offset = scns[i].s_scnptr;
257                 }
258                 else if (scns[i].s_flags & STYP_DATA) {
259                         data_address = scns[i].s_vaddr;
260                         data_size = scns[i].s_size;
261                         data_offset = scns[i].s_scnptr;
262                 } else if (scns[i].s_flags & STYP_BSS) {
263                         bss_size = scns[i].s_size;
264                 }
265         }
266
267         if ((error = load_coff_section(vmspace, vp, text_offset,
268                                       (caddr_t)(void *)(uintptr_t)text_address,
269                                       text_size, text_size,
270                                       VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
271                 goto dealloc_and_fail;
272         }
273         if ((error = load_coff_section(vmspace, vp, data_offset,
274                                       (caddr_t)(void *)(uintptr_t)data_address,
275                                       data_size + bss_size, data_size,
276                                       VM_PROT_ALL)) != 0) {
277                 goto dealloc_and_fail;
278         }
279
280         error = 0;
281
282  dealloc_and_fail:
283         kmap_free_wakeup(exec_map, (vm_offset_t)ptr,  PAGE_SIZE);
284  fail:
285         VOP_UNLOCK(vp, 0);
286  unlocked_fail:
287         NDFREE(&nd, NDF_ONLY_PNBUF);
288         vrele(nd.ni_vp);
289         return error;
290 }
291
292 static int
293 exec_coff_imgact(imgp)
294         struct image_params *imgp;
295 {
296         const struct filehdr *fhdr = (const struct filehdr*)imgp->image_header;
297         const struct aouthdr *ahdr;
298         const struct scnhdr *scns;
299         int i;
300         struct vmspace *vmspace;
301         int nscns;
302         int error;
303         unsigned long text_offset = 0, text_address = 0, text_size = 0;
304         unsigned long data_offset = 0, data_address = 0, data_size = 0;
305         unsigned long bss_size = 0;
306         vm_offset_t hole;
307
308         if (fhdr->f_magic != I386_COFF ||
309             !(fhdr->f_flags & F_EXEC)) {
310
311                  DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
312                  return -1;
313         }
314
315         nscns = fhdr->f_nscns;
316         if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
317                 /*
318                  * For now, return an error -- need to be able to
319                  * read in all of the section structures.
320                  */
321
322                 DPRINTF(("%s(%d): return -1\n", __FILE__, __LINE__));
323                 return -1;
324         }
325
326         ahdr = (const struct aouthdr*)
327                ((const char*)(imgp->image_header) + sizeof(struct filehdr));
328         imgp->entry_addr = ahdr->entry;
329
330         scns = (const struct scnhdr*)
331                ((const char*)(imgp->image_header) + sizeof(struct filehdr) +
332                 sizeof(struct aouthdr));
333
334         VOP_UNLOCK(imgp->vp, 0);
335
336         error = exec_new_vmspace(imgp, &ibcs2_svr3_sysvec);
337         if (error)
338                 goto fail;
339         vmspace = imgp->proc->p_vmspace;
340
341         for (i = 0; i < nscns; i++) {
342
343           DPRINTF(("i = %d, s_name = %s, s_vaddr = %08lx, "
344                    "s_scnptr = %ld s_size = %lx\n", i, scns[i].s_name,
345                    scns[i].s_vaddr, scns[i].s_scnptr, scns[i].s_size));
346           if (scns[i].s_flags & STYP_NOLOAD) {
347                 /*
348                  * A section that is not loaded, for whatever
349                  * reason.  It takes precedance over other flag
350                  * bits...
351                  */
352                 continue;
353           } else if (scns[i].s_flags & STYP_TEXT) {
354                 text_address = scns[i].s_vaddr;
355                 text_size = scns[i].s_size;
356                 text_offset = scns[i].s_scnptr;
357           } else if (scns[i].s_flags & STYP_DATA) {
358                 /* .data section */
359                 data_address = scns[i].s_vaddr;
360                 data_size = scns[i].s_size;
361                 data_offset = scns[i].s_scnptr;
362           } else if (scns[i].s_flags & STYP_BSS) {
363                 /* .bss section */
364                 bss_size = scns[i].s_size;
365           } else if (scns[i].s_flags & STYP_LIB) {
366                 char *buf = 0;
367                 int foff = trunc_page(scns[i].s_scnptr);
368                 int off = scns[i].s_scnptr - foff;
369                 int len = round_page(scns[i].s_size + PAGE_SIZE);
370                 int j;
371
372                 if ((error = vm_mmap(exec_map,
373                                     (vm_offset_t *) &buf,
374                                     len,
375                                     VM_PROT_READ,
376                                     VM_PROT_READ,
377                                     MAP_SHARED,
378                                     OBJT_VNODE,
379                                     imgp->vp,
380                                     foff)) != 0) {
381                         error = ENOEXEC;
382                         goto fail;
383                 }
384                 if(scns[i].s_size) {
385                         char *libbuf;
386                         int emul_path_len = strlen(ibcs2_emul_path);
387
388                         libbuf = malloc(MAXPATHLEN + emul_path_len,
389                                         M_TEMP, M_WAITOK);
390                         strcpy(libbuf, ibcs2_emul_path);
391
392                         for (j = off; j < scns[i].s_size + off;) {
393                                 long stroff, nextoff;
394                                 char *libname;
395
396                                 nextoff = 4 * *(long *)(buf + j);
397                                 stroff = 4 * *(long *)(buf + j + sizeof(long));
398
399                                 libname = buf + j + stroff;
400                                 j += nextoff;
401
402                                 DPRINTF(("%s(%d):  shared library %s\n",
403                                          __FILE__, __LINE__, libname));
404                                 strlcpy(&libbuf[emul_path_len], libname, MAXPATHLEN);
405                                 error = coff_load_file(
406                                     FIRST_THREAD_IN_PROC(imgp->proc), libbuf);
407                                 if (error)
408                                         error = coff_load_file(
409                                             FIRST_THREAD_IN_PROC(imgp->proc),
410                                             libname);
411                                 if (error) {
412                                         printf(
413                                 "error %d loading coff shared library %s\n",
414                                             error, libname);
415                                         break;
416                                 }
417                         }
418                         free(libbuf, M_TEMP);
419                 }
420                 kmap_free_wakeup(exec_map, (vm_offset_t)buf, len);
421                 if (error)
422                         goto fail;
423                 }
424         }
425         /*
426          * Map in .text now
427          */
428
429         DPRINTF(("%s(%d):  load_coff_section(vmspace, "
430                 "imgp->vp, %08lx, %08lx, 0x%lx, 0x%lx, 0x%x)\n",
431                 __FILE__, __LINE__, text_offset, text_address,
432                 text_size, text_size, VM_PROT_READ | VM_PROT_EXECUTE));
433         if ((error = load_coff_section(vmspace, imgp->vp,
434                                       text_offset,
435                                       (caddr_t)(void *)(uintptr_t)text_address,
436                                       text_size, text_size,
437                                       VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
438                 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
439                 goto fail;
440         }
441         /*
442          * Map in .data and .bss now
443          */
444
445
446         DPRINTF(("%s(%d): load_coff_section(vmspace, "
447                 "imgp->vp, 0x%08lx, 0x%08lx, 0x%lx, 0x%lx, 0x%x)\n",
448                 __FILE__, __LINE__, data_offset, data_address,
449                 data_size + bss_size, data_size, VM_PROT_ALL));
450         if ((error = load_coff_section(vmspace, imgp->vp,
451                                       data_offset,
452                                       (caddr_t)(void *)(uintptr_t)data_address,
453                                       data_size + bss_size, data_size,
454                                       VM_PROT_ALL)) != 0) {
455
456                 DPRINTF(("%s(%d): error = %d\n", __FILE__, __LINE__, error));
457                 goto fail;
458         }
459
460         imgp->interpreted = 0;
461         imgp->proc->p_sysent = &ibcs2_svr3_sysvec;
462
463         vmspace->vm_tsize = round_page(text_size) >> PAGE_SHIFT;
464         vmspace->vm_dsize = round_page(data_size + bss_size) >> PAGE_SHIFT;
465         vmspace->vm_taddr = (caddr_t)(void *)(uintptr_t)text_address;
466         vmspace->vm_daddr = (caddr_t)(void *)(uintptr_t)data_address;
467
468         hole = trunc_page((vm_offset_t)vmspace->vm_daddr +
469             ctob(vmspace->vm_dsize));
470
471         DPRINTF(("%s(%d): vm_map_find(&vmspace->vm_map, NULL, 0, &0x%jx, PAGE_SIZE, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0)\n",
472             __FILE__, __LINE__, (uintmax_t)hole));
473         DPRINTF(("imgact: error = %d\n", error));
474
475         vm_map_find(&vmspace->vm_map, NULL, 0,
476             (vm_offset_t *)&hole, PAGE_SIZE, 0, VMFS_NO_SPACE,
477             VM_PROT_ALL, VM_PROT_ALL, 0);
478         DPRINTF(("IBCS2: start vm_dsize = 0x%x, vm_daddr = 0x%p end = 0x%p\n",
479                 ctob(vmspace->vm_dsize), vmspace->vm_daddr,
480                 ctob(vmspace->vm_dsize) + vmspace->vm_daddr ));
481         DPRINTF(("%s(%d):  returning %d!\n", __FILE__, __LINE__, error));
482
483 fail:
484         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
485
486         return (error);
487 }
488
489 /*
490  * Tell kern_execve.c about it, with a little help from the linker.
491  */
492 static struct execsw coff_execsw = { exec_coff_imgact, "coff" };
493 EXEC_SET(coff, coff_execsw);