]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/imgact_elf.c
This commit was generated by cvs2svn to compensate for changes in r176892,
[FreeBSD/FreeBSD.git] / sys / kern / imgact_elf.c
1 /*-
2  * Copyright (c) 2000 David O'Brien
3  * Copyright (c) 1995-1996 Søren Schmidt
4  * Copyright (c) 1996 Peter Wemm
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer
12  *    in this position and unchanged.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_compat.h"
35
36 #include <sys/param.h>
37 #include <sys/exec.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/mman.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
49 #include <sys/proc.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/sx.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
61
62 #include <vm/vm.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
72
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
76 #endif
77
78 #define OLD_EI_BRAND    8
79
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
82     const char *interp);
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84     u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86     vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87     vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
89
90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
91     "");
92
93 int __elfN(fallback_brand) = -1;
94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
95     fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
96     __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
98     &__elfN(fallback_brand));
99
100 static int elf_trace = 0;
101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
102
103 static int elf_legacy_coredump = 0;
104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW, 
105     &elf_legacy_coredump, 0, "");
106
107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
108
109 #define trunc_page_ps(va, ps)   ((va) & ~(ps - 1))
110 #define round_page_ps(va, ps)   (((va) + (ps - 1)) & ~(ps - 1))
111 #define aligned(a, t)   (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
112
113 int
114 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
115 {
116         int i;
117
118         for (i = 0; i < MAX_BRANDS; i++) {
119                 if (elf_brand_list[i] == NULL) {
120                         elf_brand_list[i] = entry;
121                         break;
122                 }
123         }
124         if (i == MAX_BRANDS)
125                 return (-1);
126         return (0);
127 }
128
129 int
130 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
131 {
132         int i;
133
134         for (i = 0; i < MAX_BRANDS; i++) {
135                 if (elf_brand_list[i] == entry) {
136                         elf_brand_list[i] = NULL;
137                         break;
138                 }
139         }
140         if (i == MAX_BRANDS)
141                 return (-1);
142         return (0);
143 }
144
145 int
146 __elfN(brand_inuse)(Elf_Brandinfo *entry)
147 {
148         struct proc *p;
149         int rval = FALSE;
150
151         sx_slock(&allproc_lock);
152         FOREACH_PROC_IN_SYSTEM(p) {
153                 if (p->p_sysent == entry->sysvec) {
154                         rval = TRUE;
155                         break;
156                 }
157         }
158         sx_sunlock(&allproc_lock);
159
160         return (rval);
161 }
162
163 static Elf_Brandinfo *
164 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
165 {
166         Elf_Brandinfo *bi;
167         int i;
168
169         /*
170          * We support three types of branding -- (1) the ELF EI_OSABI field
171          * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
172          * branding w/in the ELF header, and (3) path of the `interp_path'
173          * field.  We should also look for an ".note.ABI-tag" ELF section now
174          * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
175          */
176
177         /* If the executable has a brand, search for it in the brand list. */
178         for (i = 0; i < MAX_BRANDS; i++) {
179                 bi = elf_brand_list[i];
180                 if (bi != NULL && hdr->e_machine == bi->machine &&
181                     (hdr->e_ident[EI_OSABI] == bi->brand ||
182                     strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
183                     bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
184                         return (bi);
185         }
186
187         /* Lacking a known brand, search for a recognized interpreter. */
188         if (interp != NULL) {
189                 for (i = 0; i < MAX_BRANDS; i++) {
190                         bi = elf_brand_list[i];
191                         if (bi != NULL && hdr->e_machine == bi->machine &&
192                             strcmp(interp, bi->interp_path) == 0)
193                                 return (bi);
194                 }
195         }
196
197         /* Lacking a recognized interpreter, try the default brand */
198         for (i = 0; i < MAX_BRANDS; i++) {
199                 bi = elf_brand_list[i];
200                 if (bi != NULL && hdr->e_machine == bi->machine &&
201                     __elfN(fallback_brand) == bi->brand)
202                         return (bi);
203         }
204         return (NULL);
205 }
206
207 static int
208 __elfN(check_header)(const Elf_Ehdr *hdr)
209 {
210         Elf_Brandinfo *bi;
211         int i;
212
213         if (!IS_ELF(*hdr) ||
214             hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
215             hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
216             hdr->e_ident[EI_VERSION] != EV_CURRENT ||
217             hdr->e_phentsize != sizeof(Elf_Phdr) ||
218             hdr->e_version != ELF_TARG_VER)
219                 return (ENOEXEC);
220
221         /*
222          * Make sure we have at least one brand for this machine.
223          */
224
225         for (i = 0; i < MAX_BRANDS; i++) {
226                 bi = elf_brand_list[i];
227                 if (bi != NULL && bi->machine == hdr->e_machine)
228                         break;
229         }
230         if (i == MAX_BRANDS)
231                 return (ENOEXEC);
232
233         return (0);
234 }
235
236 static int
237 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
238     vm_offset_t start, vm_offset_t end, vm_prot_t prot)
239 {
240         struct sf_buf *sf;
241         int error;
242         vm_offset_t off;
243
244         /*
245          * Create the page if it doesn't exist yet. Ignore errors.
246          */
247         vm_map_lock(map);
248         vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
249             VM_PROT_ALL, VM_PROT_ALL, 0);
250         vm_map_unlock(map);
251
252         /*
253          * Find the page from the underlying object.
254          */
255         if (object) {
256                 sf = vm_imgact_map_page(object, offset);
257                 if (sf == NULL)
258                         return (KERN_FAILURE);
259                 off = offset - trunc_page(offset);
260                 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
261                     end - start);
262                 vm_imgact_unmap_page(sf);
263                 if (error) {
264                         return (KERN_FAILURE);
265                 }
266         }
267
268         return (KERN_SUCCESS);
269 }
270
271 static int
272 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
273     vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
274 {
275         struct sf_buf *sf;
276         vm_offset_t off;
277         vm_size_t sz;
278         int error, rv;
279
280         if (start != trunc_page(start)) {
281                 rv = __elfN(map_partial)(map, object, offset, start,
282                     round_page(start), prot);
283                 if (rv)
284                         return (rv);
285                 offset += round_page(start) - start;
286                 start = round_page(start);
287         }
288         if (end != round_page(end)) {
289                 rv = __elfN(map_partial)(map, object, offset +
290                     trunc_page(end) - start, trunc_page(end), end, prot);
291                 if (rv)
292                         return (rv);
293                 end = trunc_page(end);
294         }
295         if (end > start) {
296                 if (offset & PAGE_MASK) {
297                         /*
298                          * The mapping is not page aligned. This means we have
299                          * to copy the data. Sigh.
300                          */
301                         rv = vm_map_find(map, NULL, 0, &start, end - start,
302                             FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
303                         if (rv)
304                                 return (rv);
305                         if (object == NULL)
306                                 return (KERN_SUCCESS);
307                         for (; start < end; start += sz) {
308                                 sf = vm_imgact_map_page(object, offset);
309                                 if (sf == NULL)
310                                         return (KERN_FAILURE);
311                                 off = offset - trunc_page(offset);
312                                 sz = end - start;
313                                 if (sz > PAGE_SIZE - off)
314                                         sz = PAGE_SIZE - off;
315                                 error = copyout((caddr_t)sf_buf_kva(sf) + off,
316                                     (caddr_t)start, sz);
317                                 vm_imgact_unmap_page(sf);
318                                 if (error) {
319                                         return (KERN_FAILURE);
320                                 }
321                                 offset += sz;
322                         }
323                         rv = KERN_SUCCESS;
324                 } else {
325                         vm_object_reference(object);
326                         vm_map_lock(map);
327                         rv = vm_map_insert(map, object, offset, start, end,
328                             prot, VM_PROT_ALL, cow);
329                         vm_map_unlock(map);
330                         if (rv != KERN_SUCCESS)
331                                 vm_object_deallocate(object);
332                 }
333                 return (rv);
334         } else {
335                 return (KERN_SUCCESS);
336         }
337 }
338
339 static int
340 __elfN(load_section)(struct vmspace *vmspace,
341         vm_object_t object, vm_offset_t offset,
342         caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
343         size_t pagesize)
344 {
345         struct sf_buf *sf;
346         size_t map_len;
347         vm_offset_t map_addr;
348         int error, rv, cow;
349         size_t copy_len;
350         vm_offset_t file_addr;
351
352         /*
353          * It's necessary to fail if the filsz + offset taken from the
354          * header is greater than the actual file pager object's size.
355          * If we were to allow this, then the vm_map_find() below would
356          * walk right off the end of the file object and into the ether.
357          *
358          * While I'm here, might as well check for something else that
359          * is invalid: filsz cannot be greater than memsz.
360          */
361         if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
362             filsz > memsz) {
363                 uprintf("elf_load_section: truncated ELF file\n");
364                 return (ENOEXEC);
365         }
366
367         map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
368         file_addr = trunc_page_ps(offset, pagesize);
369
370         /*
371          * We have two choices.  We can either clear the data in the last page
372          * of an oversized mapping, or we can start the anon mapping a page
373          * early and copy the initialized data into that first page.  We
374          * choose the second..
375          */
376         if (memsz > filsz)
377                 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
378         else
379                 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
380
381         if (map_len != 0) {
382                 /* cow flags: don't dump readonly sections in core */
383                 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
384                     (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
385
386                 rv = __elfN(map_insert)(&vmspace->vm_map,
387                                       object,
388                                       file_addr,        /* file offset */
389                                       map_addr,         /* virtual start */
390                                       map_addr + map_len,/* virtual end */
391                                       prot,
392                                       cow);
393                 if (rv != KERN_SUCCESS)
394                         return (EINVAL);
395
396                 /* we can stop now if we've covered it all */
397                 if (memsz == filsz) {
398                         return (0);
399                 }
400         }
401
402
403         /*
404          * We have to get the remaining bit of the file into the first part
405          * of the oversized map segment.  This is normally because the .data
406          * segment in the file is extended to provide bss.  It's a neat idea
407          * to try and save a page, but it's a pain in the behind to implement.
408          */
409         copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
410         map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
411         map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
412             map_addr;
413
414         /* This had damn well better be true! */
415         if (map_len != 0) {
416                 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
417                     map_addr + map_len, VM_PROT_ALL, 0);
418                 if (rv != KERN_SUCCESS) {
419                         return (EINVAL);
420                 }
421         }
422
423         if (copy_len != 0) {
424                 vm_offset_t off;
425
426                 sf = vm_imgact_map_page(object, offset + filsz);
427                 if (sf == NULL)
428                         return (EIO);
429
430                 /* send the page fragment to user space */
431                 off = trunc_page_ps(offset + filsz, pagesize) -
432                     trunc_page(offset + filsz);
433                 error = copyout((caddr_t)sf_buf_kva(sf) + off,
434                     (caddr_t)map_addr, copy_len);
435                 vm_imgact_unmap_page(sf);
436                 if (error) {
437                         return (error);
438                 }
439         }
440
441         /*
442          * set it to the specified protection.
443          * XXX had better undo the damage from pasting over the cracks here!
444          */
445         vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
446             round_page(map_addr + map_len),  prot, FALSE);
447
448         return (0);
449 }
450
451 /*
452  * Load the file "file" into memory.  It may be either a shared object
453  * or an executable.
454  *
455  * The "addr" reference parameter is in/out.  On entry, it specifies
456  * the address where a shared object should be loaded.  If the file is
457  * an executable, this value is ignored.  On exit, "addr" specifies
458  * where the file was actually loaded.
459  *
460  * The "entry" reference parameter is out only.  On exit, it specifies
461  * the entry point for the loaded file.
462  */
463 static int
464 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
465         u_long *entry, size_t pagesize)
466 {
467         struct {
468                 struct nameidata nd;
469                 struct vattr attr;
470                 struct image_params image_params;
471         } *tempdata;
472         const Elf_Ehdr *hdr = NULL;
473         const Elf_Phdr *phdr = NULL;
474         struct nameidata *nd;
475         struct vmspace *vmspace = p->p_vmspace;
476         struct vattr *attr;
477         struct image_params *imgp;
478         vm_prot_t prot;
479         u_long rbase;
480         u_long base_addr = 0;
481         int vfslocked, error, i, numsegs;
482
483         if (curthread->td_proc != p)
484                 panic("elf_load_file - thread");        /* XXXKSE DIAGNOSTIC */
485
486         tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
487         nd = &tempdata->nd;
488         attr = &tempdata->attr;
489         imgp = &tempdata->image_params;
490
491         /*
492          * Initialize part of the common data
493          */
494         imgp->proc = p;
495         imgp->attr = attr;
496         imgp->firstpage = NULL;
497         imgp->image_header = NULL;
498         imgp->object = NULL;
499         imgp->execlabel = NULL;
500
501         /* XXXKSE */
502         NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
503             curthread);
504         vfslocked = 0;
505         if ((error = namei(nd)) != 0) {
506                 nd->ni_vp = NULL;
507                 goto fail;
508         }
509         vfslocked = NDHASGIANT(nd);
510         NDFREE(nd, NDF_ONLY_PNBUF);
511         imgp->vp = nd->ni_vp;
512
513         /*
514          * Check permissions, modes, uid, etc on the file, and "open" it.
515          */
516         error = exec_check_permissions(imgp);
517         if (error)
518                 goto fail;
519
520         error = exec_map_first_page(imgp);
521         if (error)
522                 goto fail;
523
524         /*
525          * Also make certain that the interpreter stays the same, so set
526          * its VV_TEXT flag, too.
527          */
528         nd->ni_vp->v_vflag |= VV_TEXT;
529
530         imgp->object = nd->ni_vp->v_object;
531
532         hdr = (const Elf_Ehdr *)imgp->image_header;
533         if ((error = __elfN(check_header)(hdr)) != 0)
534                 goto fail;
535         if (hdr->e_type == ET_DYN)
536                 rbase = *addr;
537         else if (hdr->e_type == ET_EXEC)
538                 rbase = 0;
539         else {
540                 error = ENOEXEC;
541                 goto fail;
542         }
543
544         /* Only support headers that fit within first page for now      */
545         /*    (multiplication of two Elf_Half fields will not overflow) */
546         if ((hdr->e_phoff > PAGE_SIZE) ||
547             (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
548                 error = ENOEXEC;
549                 goto fail;
550         }
551
552         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
553         if (!aligned(phdr, Elf_Addr)) {
554                 error = ENOEXEC;
555                 goto fail;
556         }
557
558         for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
559                 if (phdr[i].p_type == PT_LOAD) {        /* Loadable segment */
560                         prot = 0;
561                         if (phdr[i].p_flags & PF_X)
562                                 prot |= VM_PROT_EXECUTE;
563                         if (phdr[i].p_flags & PF_W)
564                                 prot |= VM_PROT_WRITE;
565                         if (phdr[i].p_flags & PF_R)
566                                 prot |= VM_PROT_READ;
567
568                         if ((error = __elfN(load_section)(vmspace,
569                             imgp->object, phdr[i].p_offset,
570                             (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
571                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
572                             pagesize)) != 0)
573                                 goto fail;
574                         /*
575                          * Establish the base address if this is the
576                          * first segment.
577                          */
578                         if (numsegs == 0)
579                                 base_addr = trunc_page(phdr[i].p_vaddr +
580                                     rbase);
581                         numsegs++;
582                 }
583         }
584         *addr = base_addr;
585         *entry = (unsigned long)hdr->e_entry + rbase;
586
587 fail:
588         if (imgp->firstpage)
589                 exec_unmap_first_page(imgp);
590
591         if (nd->ni_vp)
592                 vput(nd->ni_vp);
593
594         VFS_UNLOCK_GIANT(vfslocked);
595         free(tempdata, M_TEMP);
596
597         return (error);
598 }
599
600 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
601
602 static int
603 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
604 {
605         const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
606         const Elf_Phdr *phdr, *pnote = NULL;
607         Elf_Auxargs *elf_auxargs;
608         struct vmspace *vmspace;
609         vm_prot_t prot;
610         u_long text_size = 0, data_size = 0, total_size = 0;
611         u_long text_addr = 0, data_addr = 0;
612         u_long seg_size, seg_addr;
613         u_long addr, entry = 0, proghdr = 0;
614         int error = 0, i;
615         const char *interp = NULL, *newinterp = NULL;
616         Elf_Brandinfo *brand_info;
617         const Elf_Note *note, *note_end;
618         char *path;
619         const char *note_name;
620         struct sysentvec *sv;
621
622         /*
623          * Do we have a valid ELF header ?
624          *
625          * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
626          * if particular brand doesn't support it.
627          */
628         if (__elfN(check_header)(hdr) != 0 ||
629             (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
630                 return (-1);
631
632         /*
633          * From here on down, we return an errno, not -1, as we've
634          * detected an ELF file.
635          */
636
637         if ((hdr->e_phoff > PAGE_SIZE) ||
638             (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
639                 /* Only support headers in first page for now */
640                 return (ENOEXEC);
641         }
642         phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
643         if (!aligned(phdr, Elf_Addr))
644                 return (ENOEXEC);
645         for (i = 0; i < hdr->e_phnum; i++) {
646                 if (phdr[i].p_type == PT_INTERP) {
647                         /* Path to interpreter */
648                         if (phdr[i].p_filesz > MAXPATHLEN ||
649                             phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
650                                 return (ENOEXEC);
651                         interp = imgp->image_header + phdr[i].p_offset;
652                         break;
653                 }
654         }
655
656         brand_info = __elfN(get_brandinfo)(hdr, interp);
657         if (brand_info == NULL) {
658                 uprintf("ELF binary type \"%u\" not known.\n",
659                     hdr->e_ident[EI_OSABI]);
660                 return (ENOEXEC);
661         }
662         if (hdr->e_type == ET_DYN &&
663             (brand_info->flags & BI_CAN_EXEC_DYN) == 0)
664                 return (ENOEXEC);
665         sv = brand_info->sysvec;
666         if (interp != NULL && brand_info->interp_newpath != NULL)
667                 newinterp = brand_info->interp_newpath;
668
669         /*
670          * Avoid a possible deadlock if the current address space is destroyed
671          * and that address space maps the locked vnode.  In the common case,
672          * the locked vnode's v_usecount is decremented but remains greater
673          * than zero.  Consequently, the vnode lock is not needed by vrele().
674          * However, in cases where the vnode lock is external, such as nullfs,
675          * v_usecount may become zero.
676          */
677         VOP_UNLOCK(imgp->vp, 0);
678
679         error = exec_new_vmspace(imgp, sv);
680         imgp->proc->p_sysent = sv;
681
682         vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
683         if (error)
684                 return (error);
685
686         vmspace = imgp->proc->p_vmspace;
687
688         for (i = 0; i < hdr->e_phnum; i++) {
689                 switch (phdr[i].p_type) {
690                 case PT_LOAD:   /* Loadable segment */
691                         prot = 0;
692                         if (phdr[i].p_flags & PF_X)
693                                 prot |= VM_PROT_EXECUTE;
694                         if (phdr[i].p_flags & PF_W)
695                                 prot |= VM_PROT_WRITE;
696                         if (phdr[i].p_flags & PF_R)
697                                 prot |= VM_PROT_READ;
698
699 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
700                         /*
701                          * Some x86 binaries assume read == executable,
702                          * notably the M3 runtime and therefore cvsup
703                          */
704                         if (prot & VM_PROT_READ)
705                                 prot |= VM_PROT_EXECUTE;
706 #endif
707
708                         if ((error = __elfN(load_section)(vmspace,
709                             imgp->object, phdr[i].p_offset,
710                             (caddr_t)(uintptr_t)phdr[i].p_vaddr,
711                             phdr[i].p_memsz, phdr[i].p_filesz, prot,
712                             sv->sv_pagesize)) != 0)
713                                 return (error);
714
715                         /*
716                          * If this segment contains the program headers,
717                          * remember their virtual address for the AT_PHDR
718                          * aux entry. Static binaries don't usually include
719                          * a PT_PHDR entry.
720                          */
721                         if (phdr[i].p_offset == 0 &&
722                             hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
723                                 <= phdr[i].p_filesz)
724                                 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
725
726                         seg_addr = trunc_page(phdr[i].p_vaddr);
727                         seg_size = round_page(phdr[i].p_memsz +
728                             phdr[i].p_vaddr - seg_addr);
729
730                         /*
731                          * Is this .text or .data?  We can't use
732                          * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
733                          * alpha terribly and possibly does other bad
734                          * things so we stick to the old way of figuring
735                          * it out:  If the segment contains the program
736                          * entry point, it's a text segment, otherwise it
737                          * is a data segment.
738                          *
739                          * Note that obreak() assumes that data_addr + 
740                          * data_size == end of data load area, and the ELF
741                          * file format expects segments to be sorted by
742                          * address.  If multiple data segments exist, the
743                          * last one will be used.
744                          */
745                         if (hdr->e_entry >= phdr[i].p_vaddr &&
746                             hdr->e_entry < (phdr[i].p_vaddr +
747                             phdr[i].p_memsz)) {
748                                 text_size = seg_size;
749                                 text_addr = seg_addr;
750                                 entry = (u_long)hdr->e_entry;
751                         } else {
752                                 data_size = seg_size;
753                                 data_addr = seg_addr;
754                         }
755                         total_size += seg_size;
756                         break;
757                 case PT_PHDR:   /* Program header table info */
758                         proghdr = phdr[i].p_vaddr;
759                         break;
760                 case PT_NOTE:
761                         pnote = &phdr[i];
762                         break;
763                 default:
764                         break;
765                 }
766         }
767         
768         if (data_addr == 0 && data_size == 0) {
769                 data_addr = text_addr;
770                 data_size = text_size;
771         }
772
773         /*
774          * Check limits.  It should be safe to check the
775          * limits after loading the segments since we do
776          * not actually fault in all the segments pages.
777          */
778         PROC_LOCK(imgp->proc);
779         if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
780             text_size > maxtsiz ||
781             total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
782                 PROC_UNLOCK(imgp->proc);
783                 return (ENOMEM);
784         }
785
786         vmspace->vm_tsize = text_size >> PAGE_SHIFT;
787         vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
788         vmspace->vm_dsize = data_size >> PAGE_SHIFT;
789         vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
790
791         /*
792          * We load the dynamic linker where a userland call
793          * to mmap(0, ...) would put it.  The rationale behind this
794          * calculation is that it leaves room for the heap to grow to
795          * its maximum allowed size.
796          */
797         addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
798             lim_max(imgp->proc, RLIMIT_DATA));
799         PROC_UNLOCK(imgp->proc);
800
801         imgp->entry_addr = entry;
802
803         if (interp != NULL) {
804                 int have_interp = FALSE;
805                 VOP_UNLOCK(imgp->vp, 0);
806                 if (brand_info->emul_path != NULL &&
807                     brand_info->emul_path[0] != '\0') {
808                         path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
809                         snprintf(path, MAXPATHLEN, "%s%s",
810                             brand_info->emul_path, interp);
811                         error = __elfN(load_file)(imgp->proc, path, &addr,
812                             &imgp->entry_addr, sv->sv_pagesize);
813                         free(path, M_TEMP);
814                         if (error == 0)
815                                 have_interp = TRUE;
816                 }
817                 if (!have_interp && newinterp != NULL) {
818                         error = __elfN(load_file)(imgp->proc, newinterp, &addr,
819                             &imgp->entry_addr, sv->sv_pagesize);
820                         have_interp = TRUE;
821                 }
822                 if (!have_interp) {
823                         error = __elfN(load_file)(imgp->proc, interp, &addr,
824                             &imgp->entry_addr, sv->sv_pagesize);
825                 }
826                 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
827                 if (error != 0) {
828                         uprintf("ELF interpreter %s not found\n", interp);
829                         return (error);
830                 }
831         }
832
833         /*
834          * Construct auxargs table (used by the fixup routine)
835          */
836         elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
837         elf_auxargs->execfd = -1;
838         elf_auxargs->phdr = proghdr;
839         elf_auxargs->phent = hdr->e_phentsize;
840         elf_auxargs->phnum = hdr->e_phnum;
841         elf_auxargs->pagesz = PAGE_SIZE;
842         elf_auxargs->base = addr;
843         elf_auxargs->flags = 0;
844         elf_auxargs->entry = entry;
845         elf_auxargs->trace = elf_trace;
846
847         imgp->auxargs = elf_auxargs;
848         imgp->interpreted = 0;
849
850         /*
851          * Try to fetch the osreldate for FreeBSD binary from the ELF
852          * OSABI-note. Only the first page of the image is searched,
853          * the same as for headers.
854          */
855         if (pnote != NULL && pnote->p_offset < PAGE_SIZE &&
856             pnote->p_offset + pnote->p_filesz < PAGE_SIZE ) {
857                 note = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
858                 if (!aligned(note, Elf32_Addr)) {
859                         free(imgp->auxargs, M_TEMP);
860                         imgp->auxargs = NULL;
861                         return (ENOEXEC);
862                 }
863                 note_end = (const Elf_Note *)(imgp->image_header + pnote->p_offset +
864                     pnote->p_filesz);
865                 while (note < note_end) {
866                         if (note->n_namesz == sizeof(FREEBSD_ABI_VENDOR) &&
867                             note->n_descsz == sizeof(int32_t) &&
868                             note->n_type == 1 /* ABI_NOTETYPE */) {
869                                 note_name = (const char *)(note + 1);
870                                 if (strncmp(FREEBSD_ABI_VENDOR, note_name,
871                                     sizeof(FREEBSD_ABI_VENDOR)) == 0) {
872                                         imgp->proc->p_osrel = *(const int32_t *)
873                                             (note_name +
874                                             round_page_ps(sizeof(FREEBSD_ABI_VENDOR),
875                                                 sizeof(Elf32_Addr)));
876                                         break;
877                                 }
878                         }
879                         note = (const Elf_Note *)((const char *)(note + 1) +
880                             round_page_ps(note->n_namesz, sizeof(Elf32_Addr)) +
881                             round_page_ps(note->n_descsz, sizeof(Elf32_Addr)));
882                 }
883         }
884
885         return (error);
886 }
887
888 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
889
890 int
891 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
892 {
893         Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
894         Elf_Addr *base;
895         Elf_Addr *pos;
896
897         base = (Elf_Addr *)*stack_base;
898         pos = base + (imgp->args->argc + imgp->args->envc + 2);
899
900         if (args->trace) {
901                 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
902         }
903         if (args->execfd != -1) {
904                 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
905         }
906         AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
907         AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
908         AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
909         AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
910         AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
911         AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
912         AUXARGS_ENTRY(pos, AT_BASE, args->base);
913         AUXARGS_ENTRY(pos, AT_NULL, 0);
914
915         free(imgp->auxargs, M_TEMP);
916         imgp->auxargs = NULL;
917
918         base--;
919         suword(base, (long)imgp->args->argc);
920         *stack_base = (register_t *)base;
921         return (0);
922 }
923
924 /*
925  * Code for generating ELF core dumps.
926  */
927
928 typedef void (*segment_callback)(vm_map_entry_t, void *);
929
930 /* Closure for cb_put_phdr(). */
931 struct phdr_closure {
932         Elf_Phdr *phdr;         /* Program header to fill in */
933         Elf_Off offset;         /* Offset of segment in core file */
934 };
935
936 /* Closure for cb_size_segment(). */
937 struct sseg_closure {
938         int count;              /* Count of writable segments. */
939         size_t size;            /* Total size of all writable segments. */
940 };
941
942 static void cb_put_phdr(vm_map_entry_t, void *);
943 static void cb_size_segment(vm_map_entry_t, void *);
944 static void each_writable_segment(struct thread *, segment_callback, void *);
945 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
946     int, void *, size_t);
947 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
948 static void __elfN(putnote)(void *, size_t *, const char *, int,
949     const void *, size_t);
950
951 int
952 __elfN(coredump)(td, vp, limit)
953         struct thread *td;
954         struct vnode *vp;
955         off_t limit;
956 {
957         struct ucred *cred = td->td_ucred;
958         int error = 0;
959         struct sseg_closure seginfo;
960         void *hdr;
961         size_t hdrsize;
962
963         /* Size the program segments. */
964         seginfo.count = 0;
965         seginfo.size = 0;
966         each_writable_segment(td, cb_size_segment, &seginfo);
967
968         /*
969          * Calculate the size of the core file header area by making
970          * a dry run of generating it.  Nothing is written, but the
971          * size is calculated.
972          */
973         hdrsize = 0;
974         __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
975
976         if (hdrsize + seginfo.size >= limit)
977                 return (EFAULT);
978
979         /*
980          * Allocate memory for building the header, fill it up,
981          * and write it out.
982          */
983         hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
984         if (hdr == NULL) {
985                 return (EINVAL);
986         }
987         error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
988
989         /* Write the contents of all of the writable segments. */
990         if (error == 0) {
991                 Elf_Phdr *php;
992                 off_t offset;
993                 int i;
994
995                 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
996                 offset = hdrsize;
997                 for (i = 0; i < seginfo.count; i++) {
998                         error = vn_rdwr_inchunks(UIO_WRITE, vp,
999                             (caddr_t)(uintptr_t)php->p_vaddr,
1000                             php->p_filesz, offset, UIO_USERSPACE,
1001                             IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1002                             curthread); /* XXXKSE */
1003                         if (error != 0)
1004                                 break;
1005                         offset += php->p_filesz;
1006                         php++;
1007                 }
1008         }
1009         free(hdr, M_TEMP);
1010
1011         return (error);
1012 }
1013
1014 /*
1015  * A callback for each_writable_segment() to write out the segment's
1016  * program header entry.
1017  */
1018 static void
1019 cb_put_phdr(entry, closure)
1020         vm_map_entry_t entry;
1021         void *closure;
1022 {
1023         struct phdr_closure *phc = (struct phdr_closure *)closure;
1024         Elf_Phdr *phdr = phc->phdr;
1025
1026         phc->offset = round_page(phc->offset);
1027
1028         phdr->p_type = PT_LOAD;
1029         phdr->p_offset = phc->offset;
1030         phdr->p_vaddr = entry->start;
1031         phdr->p_paddr = 0;
1032         phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1033         phdr->p_align = PAGE_SIZE;
1034         phdr->p_flags = 0;
1035         if (entry->protection & VM_PROT_READ)
1036                 phdr->p_flags |= PF_R;
1037         if (entry->protection & VM_PROT_WRITE)
1038                 phdr->p_flags |= PF_W;
1039         if (entry->protection & VM_PROT_EXECUTE)
1040                 phdr->p_flags |= PF_X;
1041
1042         phc->offset += phdr->p_filesz;
1043         phc->phdr++;
1044 }
1045
1046 /*
1047  * A callback for each_writable_segment() to gather information about
1048  * the number of segments and their total size.
1049  */
1050 static void
1051 cb_size_segment(entry, closure)
1052         vm_map_entry_t entry;
1053         void *closure;
1054 {
1055         struct sseg_closure *ssc = (struct sseg_closure *)closure;
1056
1057         ssc->count++;
1058         ssc->size += entry->end - entry->start;
1059 }
1060
1061 /*
1062  * For each writable segment in the process's memory map, call the given
1063  * function with a pointer to the map entry and some arbitrary
1064  * caller-supplied data.
1065  */
1066 static void
1067 each_writable_segment(td, func, closure)
1068         struct thread *td;
1069         segment_callback func;
1070         void *closure;
1071 {
1072         struct proc *p = td->td_proc;
1073         vm_map_t map = &p->p_vmspace->vm_map;
1074         vm_map_entry_t entry;
1075         vm_object_t backing_object, object;
1076         boolean_t ignore_entry;
1077
1078         vm_map_lock_read(map);
1079         for (entry = map->header.next; entry != &map->header;
1080             entry = entry->next) {
1081                 /*
1082                  * Don't dump inaccessible mappings, deal with legacy
1083                  * coredump mode.
1084                  *
1085                  * Note that read-only segments related to the elf binary
1086                  * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1087                  * need to arbitrarily ignore such segments.
1088                  */
1089                 if (elf_legacy_coredump) {
1090                         if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1091                                 continue;
1092                 } else {
1093                         if ((entry->protection & VM_PROT_ALL) == 0)
1094                                 continue;
1095                 }
1096
1097                 /*
1098                  * Dont include memory segment in the coredump if
1099                  * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1100                  * madvise(2).  Do not dump submaps (i.e. parts of the
1101                  * kernel map).
1102                  */
1103                 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1104                         continue;
1105
1106                 if ((object = entry->object.vm_object) == NULL)
1107                         continue;
1108
1109                 /* Ignore memory-mapped devices and such things. */
1110                 VM_OBJECT_LOCK(object);
1111                 while ((backing_object = object->backing_object) != NULL) {
1112                         VM_OBJECT_LOCK(backing_object);
1113                         VM_OBJECT_UNLOCK(object);
1114                         object = backing_object;
1115                 }
1116                 ignore_entry = object->type != OBJT_DEFAULT &&
1117                     object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1118                 VM_OBJECT_UNLOCK(object);
1119                 if (ignore_entry)
1120                         continue;
1121
1122                 (*func)(entry, closure);
1123         }
1124         vm_map_unlock_read(map);
1125 }
1126
1127 /*
1128  * Write the core file header to the file, including padding up to
1129  * the page boundary.
1130  */
1131 static int
1132 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1133         struct thread *td;
1134         struct vnode *vp;
1135         struct ucred *cred;
1136         int numsegs;
1137         size_t hdrsize;
1138         void *hdr;
1139 {
1140         size_t off;
1141
1142         /* Fill in the header. */
1143         bzero(hdr, hdrsize);
1144         off = 0;
1145         __elfN(puthdr)(td, hdr, &off, numsegs);
1146
1147         /* Write it to the core file. */
1148         return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1149             UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1150             td)); /* XXXKSE */
1151 }
1152
1153 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1154 typedef struct prstatus32 elf_prstatus_t;
1155 typedef struct prpsinfo32 elf_prpsinfo_t;
1156 typedef struct fpreg32 elf_prfpregset_t;
1157 typedef struct fpreg32 elf_fpregset_t;
1158 typedef struct reg32 elf_gregset_t;
1159 #else
1160 typedef prstatus_t elf_prstatus_t;
1161 typedef prpsinfo_t elf_prpsinfo_t;
1162 typedef prfpregset_t elf_prfpregset_t;
1163 typedef prfpregset_t elf_fpregset_t;
1164 typedef gregset_t elf_gregset_t;
1165 #endif
1166
1167 static void
1168 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1169 {
1170         struct {
1171                 elf_prstatus_t status;
1172                 elf_prfpregset_t fpregset;
1173                 elf_prpsinfo_t psinfo;
1174         } *tempdata;
1175         elf_prstatus_t *status;
1176         elf_prfpregset_t *fpregset;
1177         elf_prpsinfo_t *psinfo;
1178         struct proc *p;
1179         struct thread *thr;
1180         size_t ehoff, noteoff, notesz, phoff;
1181
1182         p = td->td_proc;
1183
1184         ehoff = *off;
1185         *off += sizeof(Elf_Ehdr);
1186
1187         phoff = *off;
1188         *off += (numsegs + 1) * sizeof(Elf_Phdr);
1189
1190         noteoff = *off;
1191         /*
1192          * Don't allocate space for the notes if we're just calculating
1193          * the size of the header. We also don't collect the data.
1194          */
1195         if (dst != NULL) {
1196                 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1197                 status = &tempdata->status;
1198                 fpregset = &tempdata->fpregset;
1199                 psinfo = &tempdata->psinfo;
1200         } else {
1201                 tempdata = NULL;
1202                 status = NULL;
1203                 fpregset = NULL;
1204                 psinfo = NULL;
1205         }
1206
1207         if (dst != NULL) {
1208                 psinfo->pr_version = PRPSINFO_VERSION;
1209                 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1210                 strlcpy(psinfo->pr_fname, td->td_name, sizeof(psinfo->pr_fname));
1211                 /*
1212                  * XXX - We don't fill in the command line arguments properly
1213                  * yet.
1214                  */
1215                 strlcpy(psinfo->pr_psargs, td->td_name,
1216                     sizeof(psinfo->pr_psargs));
1217         }
1218         __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1219             sizeof *psinfo);
1220
1221         /*
1222          * To have the debugger select the right thread (LWP) as the initial
1223          * thread, we dump the state of the thread passed to us in td first.
1224          * This is the thread that causes the core dump and thus likely to
1225          * be the right thread one wants to have selected in the debugger.
1226          */
1227         thr = td;
1228         while (thr != NULL) {
1229                 if (dst != NULL) {
1230                         status->pr_version = PRSTATUS_VERSION;
1231                         status->pr_statussz = sizeof(elf_prstatus_t);
1232                         status->pr_gregsetsz = sizeof(elf_gregset_t);
1233                         status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1234                         status->pr_osreldate = osreldate;
1235                         status->pr_cursig = p->p_sig;
1236                         status->pr_pid = thr->td_tid;
1237 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1238                         fill_regs32(thr, &status->pr_reg);
1239                         fill_fpregs32(thr, fpregset);
1240 #else
1241                         fill_regs(thr, &status->pr_reg);
1242                         fill_fpregs(thr, fpregset);
1243 #endif
1244                 }
1245                 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1246                     sizeof *status);
1247                 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1248                     sizeof *fpregset);
1249                 /*
1250                  * Allow for MD specific notes, as well as any MD
1251                  * specific preparations for writing MI notes.
1252                  */
1253                 __elfN(dump_thread)(thr, dst, off);
1254
1255                 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1256                     TAILQ_NEXT(thr, td_plist);
1257                 if (thr == td)
1258                         thr = TAILQ_NEXT(thr, td_plist);
1259         }
1260
1261         notesz = *off - noteoff;
1262
1263         if (dst != NULL)
1264                 free(tempdata, M_TEMP);
1265
1266         /* Align up to a page boundary for the program segments. */
1267         *off = round_page(*off);
1268
1269         if (dst != NULL) {
1270                 Elf_Ehdr *ehdr;
1271                 Elf_Phdr *phdr;
1272                 struct phdr_closure phc;
1273
1274                 /*
1275                  * Fill in the ELF header.
1276                  */
1277                 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1278                 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1279                 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1280                 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1281                 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1282                 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1283                 ehdr->e_ident[EI_DATA] = ELF_DATA;
1284                 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1285                 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1286                 ehdr->e_ident[EI_ABIVERSION] = 0;
1287                 ehdr->e_ident[EI_PAD] = 0;
1288                 ehdr->e_type = ET_CORE;
1289 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1290                 ehdr->e_machine = EM_386;
1291 #else
1292                 ehdr->e_machine = ELF_ARCH;
1293 #endif
1294                 ehdr->e_version = EV_CURRENT;
1295                 ehdr->e_entry = 0;
1296                 ehdr->e_phoff = phoff;
1297                 ehdr->e_flags = 0;
1298                 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1299                 ehdr->e_phentsize = sizeof(Elf_Phdr);
1300                 ehdr->e_phnum = numsegs + 1;
1301                 ehdr->e_shentsize = sizeof(Elf_Shdr);
1302                 ehdr->e_shnum = 0;
1303                 ehdr->e_shstrndx = SHN_UNDEF;
1304
1305                 /*
1306                  * Fill in the program header entries.
1307                  */
1308                 phdr = (Elf_Phdr *)((char *)dst + phoff);
1309
1310                 /* The note segement. */
1311                 phdr->p_type = PT_NOTE;
1312                 phdr->p_offset = noteoff;
1313                 phdr->p_vaddr = 0;
1314                 phdr->p_paddr = 0;
1315                 phdr->p_filesz = notesz;
1316                 phdr->p_memsz = 0;
1317                 phdr->p_flags = 0;
1318                 phdr->p_align = 0;
1319                 phdr++;
1320
1321                 /* All the writable segments from the program. */
1322                 phc.phdr = phdr;
1323                 phc.offset = *off;
1324                 each_writable_segment(td, cb_put_phdr, &phc);
1325         }
1326 }
1327
1328 static void
1329 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1330     const void *desc, size_t descsz)
1331 {
1332         Elf_Note note;
1333
1334         note.n_namesz = strlen(name) + 1;
1335         note.n_descsz = descsz;
1336         note.n_type = type;
1337         if (dst != NULL)
1338                 bcopy(&note, (char *)dst + *off, sizeof note);
1339         *off += sizeof note;
1340         if (dst != NULL)
1341                 bcopy(name, (char *)dst + *off, note.n_namesz);
1342         *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1343         if (dst != NULL)
1344                 bcopy(desc, (char *)dst + *off, note.n_descsz);
1345         *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1346 }
1347
1348 /*
1349  * Tell kern_execve.c about it, with a little help from the linker.
1350  */
1351 static struct execsw __elfN(execsw) = {
1352         __CONCAT(exec_, __elfN(imgact)),
1353         __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1354 };
1355 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));