2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
36 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
40 * Mapped file (mmap) interface to VM
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
46 #include "opt_compat.h"
47 #include "opt_hwpmc_hooks.h"
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/mutex.h>
53 #include <sys/sysproto.h>
54 #include <sys/filedesc.h>
57 #include <sys/resource.h>
58 #include <sys/resourcevar.h>
59 #include <sys/vnode.h>
60 #include <sys/fcntl.h>
63 #include <sys/mount.h>
66 #include <sys/sysent.h>
67 #include <sys/vmmeter.h>
69 #include <security/mac/mac_framework.h>
72 #include <vm/vm_param.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pager.h>
78 #include <vm/vm_pageout.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_page.h>
83 #include <sys/pmckern.h>
86 #ifndef _SYS_SYSPROTO_H_
92 static int vm_mmap_vnode(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
93 int *, struct vnode *, vm_ooffset_t *, vm_object_t *);
94 static int vm_mmap_cdev(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
95 int *, struct cdev *, vm_ooffset_t *, vm_object_t *);
96 static int vm_mmap_shm(struct thread *, vm_size_t, vm_prot_t, vm_prot_t *,
97 int *, struct shmfd *, vm_ooffset_t, vm_object_t *);
106 struct sbrk_args *uap;
108 /* Not yet implemented */
112 #ifndef _SYS_SYSPROTO_H_
125 struct sstk_args *uap;
127 /* Not yet implemented */
131 #if defined(COMPAT_43)
132 #ifndef _SYS_SYSPROTO_H_
133 struct getpagesize_args {
140 ogetpagesize(td, uap)
142 struct getpagesize_args *uap;
145 td->td_retval[0] = PAGE_SIZE;
148 #endif /* COMPAT_43 */
152 * Memory Map (mmap) system call. Note that the file offset
153 * and address are allowed to be NOT page aligned, though if
154 * the MAP_FIXED flag it set, both must have the same remainder
155 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
156 * page-aligned, the actual mapping starts at trunc_page(addr)
157 * and the return value is adjusted up by the page offset.
159 * Generally speaking, only character devices which are themselves
160 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
161 * there would be no cache coherency between a descriptor and a VM mapping
162 * both to the same character device.
164 #ifndef _SYS_SYSPROTO_H_
182 struct mmap_args *uap;
185 struct pmckern_map_in pkm;
190 vm_size_t size, pageoff;
191 vm_prot_t prot, maxprot;
193 objtype_t handle_type;
196 struct vmspace *vms = td->td_proc->p_vmspace;
198 addr = (vm_offset_t) uap->addr;
200 prot = uap->prot & VM_PROT_ALL;
207 * Enforce the constraints.
208 * Mapping of length 0 is only allowed for old binaries.
209 * Anonymous mapping shall specify -1 as filedescriptor and
210 * zero position for new code. Be nice to ancient a.out
211 * binaries and correct pos for anonymous mapping, since old
212 * ld.so sometimes issues anonymous map requests with non-zero
215 if (!SV_CURPROC_FLAG(SV_AOUT)) {
216 if ((uap->len == 0 && curproc->p_osrel >= P_OSREL_MAP_ANON) ||
217 ((flags & MAP_ANON) != 0 && (uap->fd != -1 || pos != 0)))
220 if ((flags & MAP_ANON) != 0)
224 if (flags & MAP_STACK) {
225 if ((uap->fd != -1) ||
226 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
233 * Align the file position to a page boundary,
234 * and save its page offset component.
236 pageoff = (pos & PAGE_MASK);
239 /* Adjust size for rounding (on both ends). */
240 size += pageoff; /* low end... */
241 size = (vm_size_t) round_page(size); /* hi end */
244 * Check for illegal addresses. Watch out for address wrap... Note
245 * that VM_*_ADDRESS are not constants due to casts (argh).
247 if (flags & MAP_FIXED) {
249 * The specified address must have the same remainder
250 * as the file offset taken modulo PAGE_SIZE, so it
251 * should be aligned after adjustment by pageoff.
254 if (addr & PAGE_MASK)
256 /* Address range must be all in user VM space. */
257 if (addr < vm_map_min(&vms->vm_map) ||
258 addr + size > vm_map_max(&vms->vm_map))
260 if (addr + size < addr)
264 * XXX for non-fixed mappings where no hint is provided or
265 * the hint would fall in the potential heap space,
266 * place it after the end of the largest possible heap.
268 * There should really be a pmap call to determine a reasonable
271 PROC_LOCK(td->td_proc);
273 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
274 addr < round_page((vm_offset_t)vms->vm_daddr +
275 lim_max(td->td_proc, RLIMIT_DATA))))
276 addr = round_page((vm_offset_t)vms->vm_daddr +
277 lim_max(td->td_proc, RLIMIT_DATA));
278 PROC_UNLOCK(td->td_proc);
280 if (flags & MAP_ANON) {
282 * Mapping blank space is trivial.
285 handle_type = OBJT_DEFAULT;
286 maxprot = VM_PROT_ALL;
289 * Mapping file, get fp for validation and
290 * don't let the descriptor disappear on us if we block.
292 if ((error = fget(td, uap->fd, &fp)) != 0)
294 if (fp->f_type == DTYPE_SHM) {
296 handle_type = OBJT_SWAP;
297 maxprot = VM_PROT_NONE;
299 /* FREAD should always be set. */
300 if (fp->f_flag & FREAD)
301 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
302 if (fp->f_flag & FWRITE)
303 maxprot |= VM_PROT_WRITE;
306 if (fp->f_type != DTYPE_VNODE) {
310 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \
311 defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4)
313 * POSIX shared-memory objects are defined to have
314 * kernel persistence, and are not defined to support
315 * read(2)/write(2) -- or even open(2). Thus, we can
316 * use MAP_ASYNC to trade on-disk coherence for speed.
317 * The shm_open(3) library routine turns on the FPOSIXSHM
318 * flag to request this behavior.
320 if (fp->f_flag & FPOSIXSHM)
325 * Ensure that file and memory protections are
326 * compatible. Note that we only worry about
327 * writability if mapping is shared; in this case,
328 * current and max prot are dictated by the open file.
329 * XXX use the vnode instead? Problem is: what
330 * credentials do we use for determination? What if
331 * proc does a setuid?
333 if (vp->v_mount != NULL && vp->v_mount->mnt_flag & MNT_NOEXEC)
334 maxprot = VM_PROT_NONE;
336 maxprot = VM_PROT_EXECUTE;
337 if (fp->f_flag & FREAD) {
338 maxprot |= VM_PROT_READ;
339 } else if (prot & PROT_READ) {
344 * If we are sharing potential changes (either via
345 * MAP_SHARED or via the implicit sharing of character
346 * device mappings), and we are trying to get write
347 * permission although we opened it without asking
350 if ((flags & MAP_SHARED) != 0) {
351 if ((fp->f_flag & FWRITE) != 0) {
352 maxprot |= VM_PROT_WRITE;
353 } else if ((prot & PROT_WRITE) != 0) {
357 } else if (vp->v_type != VCHR || (fp->f_flag & FWRITE) != 0) {
358 maxprot |= VM_PROT_WRITE;
361 handle_type = OBJT_VNODE;
365 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
366 flags, handle_type, handle, pos);
369 /* inform hwpmc(4) if an executable is being mapped */
370 if (error == 0 && handle_type == OBJT_VNODE &&
371 (prot & PROT_EXEC)) {
372 pkm.pm_file = handle;
373 pkm.pm_address = (uintptr_t) addr;
374 PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
378 td->td_retval[0] = (register_t) (addr + pageoff);
387 freebsd6_mmap(struct thread *td, struct freebsd6_mmap_args *uap)
389 struct mmap_args oargs;
391 oargs.addr = uap->addr;
392 oargs.len = uap->len;
393 oargs.prot = uap->prot;
394 oargs.flags = uap->flags;
396 oargs.pos = uap->pos;
397 return (mmap(td, &oargs));
401 #ifndef _SYS_SYSPROTO_H_
414 struct ommap_args *uap;
416 struct mmap_args nargs;
417 static const char cvtbsdprot[8] = {
421 PROT_EXEC | PROT_WRITE,
423 PROT_EXEC | PROT_READ,
424 PROT_WRITE | PROT_READ,
425 PROT_EXEC | PROT_WRITE | PROT_READ,
428 #define OMAP_ANON 0x0002
429 #define OMAP_COPY 0x0020
430 #define OMAP_SHARED 0x0010
431 #define OMAP_FIXED 0x0100
433 nargs.addr = uap->addr;
434 nargs.len = uap->len;
435 nargs.prot = cvtbsdprot[uap->prot & 0x7];
437 if (uap->flags & OMAP_ANON)
438 nargs.flags |= MAP_ANON;
439 if (uap->flags & OMAP_COPY)
440 nargs.flags |= MAP_COPY;
441 if (uap->flags & OMAP_SHARED)
442 nargs.flags |= MAP_SHARED;
444 nargs.flags |= MAP_PRIVATE;
445 if (uap->flags & OMAP_FIXED)
446 nargs.flags |= MAP_FIXED;
448 nargs.pos = uap->pos;
449 return (mmap(td, &nargs));
451 #endif /* COMPAT_43 */
454 #ifndef _SYS_SYSPROTO_H_
467 struct msync_args *uap;
470 vm_size_t size, pageoff;
475 addr = (vm_offset_t) uap->addr;
479 pageoff = (addr & PAGE_MASK);
482 size = (vm_size_t) round_page(size);
483 if (addr + size < addr)
486 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
489 map = &td->td_proc->p_vmspace->vm_map;
492 * Clean the pages and interpret the return value.
494 rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
495 (flags & MS_INVALIDATE) != 0);
499 case KERN_INVALID_ADDRESS:
501 case KERN_INVALID_ARGUMENT:
510 #ifndef _SYS_SYSPROTO_H_
522 struct munmap_args *uap;
525 struct pmckern_map_out pkm;
526 vm_map_entry_t entry;
529 vm_size_t size, pageoff;
532 addr = (vm_offset_t) uap->addr;
537 pageoff = (addr & PAGE_MASK);
540 size = (vm_size_t) round_page(size);
541 if (addr + size < addr)
545 * Check for illegal addresses. Watch out for address wrap...
547 map = &td->td_proc->p_vmspace->vm_map;
548 if (addr < vm_map_min(map) || addr + size > vm_map_max(map))
553 * Inform hwpmc if the address range being unmapped contains
554 * an executable region.
556 pkm.pm_address = (uintptr_t) NULL;
557 if (vm_map_lookup_entry(map, addr, &entry)) {
559 entry != &map->header && entry->start < addr + size;
560 entry = entry->next) {
561 if (vm_map_check_protection(map, entry->start,
562 entry->end, VM_PROT_EXECUTE) == TRUE) {
563 pkm.pm_address = (uintptr_t) addr;
564 pkm.pm_size = (size_t) size;
570 vm_map_delete(map, addr, addr + size);
573 /* downgrade the lock to prevent a LOR with the pmc-sx lock */
574 vm_map_lock_downgrade(map);
575 if (pkm.pm_address != (uintptr_t) NULL)
576 PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
577 vm_map_unlock_read(map);
581 /* vm_map_delete returns nothing but KERN_SUCCESS anyway */
585 #ifndef _SYS_SYSPROTO_H_
586 struct mprotect_args {
598 struct mprotect_args *uap;
601 vm_size_t size, pageoff;
604 addr = (vm_offset_t) uap->addr;
606 prot = uap->prot & VM_PROT_ALL;
608 pageoff = (addr & PAGE_MASK);
611 size = (vm_size_t) round_page(size);
612 if (addr + size < addr)
615 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
616 addr + size, prot, FALSE)) {
619 case KERN_PROTECTION_FAILURE:
621 case KERN_RESOURCE_SHORTAGE:
627 #ifndef _SYS_SYSPROTO_H_
628 struct minherit_args {
640 struct minherit_args *uap;
643 vm_size_t size, pageoff;
644 vm_inherit_t inherit;
646 addr = (vm_offset_t)uap->addr;
648 inherit = uap->inherit;
650 pageoff = (addr & PAGE_MASK);
653 size = (vm_size_t) round_page(size);
654 if (addr + size < addr)
657 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
658 addr + size, inherit)) {
661 case KERN_PROTECTION_FAILURE:
667 #ifndef _SYS_SYSPROTO_H_
668 struct madvise_args {
682 struct madvise_args *uap;
684 vm_offset_t start, end;
690 * Check for our special case, advising the swap pager we are
693 if (uap->behav == MADV_PROTECT) {
694 error = priv_check(td, PRIV_VM_MADV_PROTECT);
698 p->p_flag |= P_PROTECTED;
704 * Check for illegal behavior
706 if (uap->behav < 0 || uap->behav > MADV_CORE)
709 * Check for illegal addresses. Watch out for address wrap... Note
710 * that VM_*_ADDRESS are not constants due to casts (argh).
712 map = &td->td_proc->p_vmspace->vm_map;
713 if ((vm_offset_t)uap->addr < vm_map_min(map) ||
714 (vm_offset_t)uap->addr + uap->len > vm_map_max(map))
716 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
720 * Since this routine is only advisory, we default to conservative
723 start = trunc_page((vm_offset_t) uap->addr);
724 end = round_page((vm_offset_t) uap->addr + uap->len);
726 if (vm_map_madvise(map, start, end, uap->behav))
731 #ifndef _SYS_SYSPROTO_H_
732 struct mincore_args {
746 struct mincore_args *uap;
748 vm_offset_t addr, first_addr;
749 vm_offset_t end, cend;
754 int vecindex, lastvecindex;
755 vm_map_entry_t current;
756 vm_map_entry_t entry;
758 unsigned int timestamp;
761 * Make sure that the addresses presented are valid for user
764 first_addr = addr = trunc_page((vm_offset_t) uap->addr);
765 end = addr + (vm_size_t)round_page(uap->len);
766 map = &td->td_proc->p_vmspace->vm_map;
767 if (end > vm_map_max(map) || end < addr)
771 * Address of byte vector
775 pmap = vmspace_pmap(td->td_proc->p_vmspace);
777 vm_map_lock_read(map);
779 timestamp = map->timestamp;
781 if (!vm_map_lookup_entry(map, addr, &entry)) {
782 vm_map_unlock_read(map);
787 * Do this on a map entry basis so that if the pages are not
788 * in the current processes address space, we can easily look
789 * up the pages elsewhere.
792 for (current = entry;
793 (current != &map->header) && (current->start < end);
794 current = current->next) {
797 * check for contiguity
799 if (current->end < end &&
800 (entry->next == &map->header ||
801 current->next->start > current->end)) {
802 vm_map_unlock_read(map);
807 * ignore submaps (for now) or null objects
809 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
810 current->object.vm_object == NULL)
814 * limit this scan to the current map entry and the
815 * limits for the mincore call
817 if (addr < current->start)
818 addr = current->start;
824 * scan this entry one page at a time
826 while (addr < cend) {
828 * Check pmap first, it is likely faster, also
829 * it can provide info as to whether we are the
830 * one referencing or modifying the page.
832 mincoreinfo = pmap_mincore(pmap, addr);
838 * calculate the page index into the object
840 offset = current->offset + (addr - current->start);
841 pindex = OFF_TO_IDX(offset);
842 VM_OBJECT_LOCK(current->object.vm_object);
843 m = vm_page_lookup(current->object.vm_object,
846 * if the page is resident, then gather information about
849 if (m != NULL && m->valid != 0) {
850 mincoreinfo = MINCORE_INCORE;
851 vm_page_lock_queues();
854 mincoreinfo |= MINCORE_MODIFIED_OTHER;
855 if ((m->flags & PG_REFERENCED) ||
856 pmap_ts_referenced(m)) {
857 vm_page_flag_set(m, PG_REFERENCED);
858 mincoreinfo |= MINCORE_REFERENCED_OTHER;
860 vm_page_unlock_queues();
862 VM_OBJECT_UNLOCK(current->object.vm_object);
866 * subyte may page fault. In case it needs to modify
867 * the map, we release the lock.
869 vm_map_unlock_read(map);
872 * calculate index into user supplied byte vector
874 vecindex = OFF_TO_IDX(addr - first_addr);
877 * If we have skipped map entries, we need to make sure that
878 * the byte vector is zeroed for those skipped entries.
880 while ((lastvecindex + 1) < vecindex) {
882 error = subyte(vec + lastvecindex, 0);
890 * Pass the page information to the user
892 error = subyte(vec + vecindex, mincoreinfo);
899 * If the map has changed, due to the subyte, the previous
900 * output may be invalid.
902 vm_map_lock_read(map);
903 if (timestamp != map->timestamp)
906 lastvecindex = vecindex;
912 * subyte may page fault. In case it needs to modify
913 * the map, we release the lock.
915 vm_map_unlock_read(map);
918 * Zero the last entries in the byte vector.
920 vecindex = OFF_TO_IDX(end - first_addr);
921 while ((lastvecindex + 1) < vecindex) {
923 error = subyte(vec + lastvecindex, 0);
931 * If the map has changed, due to the subyte, the previous
932 * output may be invalid.
934 vm_map_lock_read(map);
935 if (timestamp != map->timestamp)
937 vm_map_unlock_read(map);
942 #ifndef _SYS_SYSPROTO_H_
954 struct mlock_args *uap;
957 vm_offset_t addr, end, last, start;
958 vm_size_t npages, size;
961 error = priv_check(td, PRIV_VM_MLOCK);
964 addr = (vm_offset_t)uap->addr;
967 start = trunc_page(addr);
968 end = round_page(last);
969 if (last < addr || end < addr)
971 npages = atop(end - start);
972 if (npages > vm_page_max_wired)
977 pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
978 lim_cur(proc, RLIMIT_MEMLOCK)) {
983 if (npages + cnt.v_wire_count > vm_page_max_wired)
985 error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
986 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
987 return (error == KERN_SUCCESS ? 0 : ENOMEM);
990 #ifndef _SYS_SYSPROTO_H_
991 struct mlockall_args {
1002 struct mlockall_args *uap;
1007 map = &td->td_proc->p_vmspace->vm_map;
1010 if ((uap->how == 0) || ((uap->how & ~(MCL_CURRENT|MCL_FUTURE)) != 0))
1015 * If wiring all pages in the process would cause it to exceed
1016 * a hard resource limit, return ENOMEM.
1018 PROC_LOCK(td->td_proc);
1019 if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
1020 lim_cur(td->td_proc, RLIMIT_MEMLOCK))) {
1021 PROC_UNLOCK(td->td_proc);
1024 PROC_UNLOCK(td->td_proc);
1026 error = priv_check(td, PRIV_VM_MLOCK);
1031 if (uap->how & MCL_FUTURE) {
1033 vm_map_modflags(map, MAP_WIREFUTURE, 0);
1038 if (uap->how & MCL_CURRENT) {
1040 * P1003.1-2001 mandates that all currently mapped pages
1041 * will be memory resident and locked (wired) upon return
1042 * from mlockall(). vm_map_wire() will wire pages, by
1043 * calling vm_fault_wire() for each page in the region.
1045 error = vm_map_wire(map, vm_map_min(map), vm_map_max(map),
1046 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1047 error = (error == KERN_SUCCESS ? 0 : EAGAIN);
1053 #ifndef _SYS_SYSPROTO_H_
1054 struct munlockall_args {
1065 struct munlockall_args *uap;
1070 map = &td->td_proc->p_vmspace->vm_map;
1071 error = priv_check(td, PRIV_VM_MUNLOCK);
1075 /* Clear the MAP_WIREFUTURE flag from this vm_map. */
1077 vm_map_modflags(map, 0, MAP_WIREFUTURE);
1080 /* Forcibly unwire all pages. */
1081 error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
1082 VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
1087 #ifndef _SYS_SYSPROTO_H_
1088 struct munlock_args {
1099 struct munlock_args *uap;
1101 vm_offset_t addr, end, last, start;
1105 error = priv_check(td, PRIV_VM_MUNLOCK);
1108 addr = (vm_offset_t)uap->addr;
1111 start = trunc_page(addr);
1112 end = round_page(last);
1113 if (last < addr || end < addr)
1115 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
1116 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
1117 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1125 * Helper function for vm_mmap. Perform sanity check specific for mmap
1126 * operations on vnodes.
1129 vm_mmap_vnode(struct thread *td, vm_size_t objsize,
1130 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1131 struct vnode *vp, vm_ooffset_t *foffp, vm_object_t *objp)
1142 cred = td->td_ucred;
1143 vfslocked = VFS_LOCK_GIANT(mp);
1144 if ((error = vget(vp, LK_SHARED, td)) != 0) {
1145 VFS_UNLOCK_GIANT(vfslocked);
1151 if (vp->v_type == VREG) {
1153 * Get the proper underlying object
1159 if (obj->handle != vp) {
1161 vp = (struct vnode*)obj->handle;
1162 vget(vp, LK_SHARED, td);
1164 } else if (vp->v_type == VCHR) {
1165 error = vm_mmap_cdev(td, objsize, prot, maxprotp, flagsp,
1166 vp->v_rdev, foffp, objp);
1174 if ((error = VOP_GETATTR(vp, &va, cred)))
1177 error = mac_vnode_check_mmap(cred, vp, prot, flags);
1181 if ((flags & MAP_SHARED) != 0) {
1182 if ((va.va_flags & (SF_SNAPSHOT|IMMUTABLE|APPEND)) != 0) {
1183 if (prot & PROT_WRITE) {
1187 *maxprotp &= ~VM_PROT_WRITE;
1191 * If it is a regular file without any references
1192 * we do not need to sync it.
1193 * Adjust object size to be the size of actual file.
1195 objsize = round_page(va.va_size);
1196 if (va.va_nlink == 0)
1197 flags |= MAP_NOSYNC;
1198 obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
1207 vfs_mark_atime(vp, cred);
1211 VFS_UNLOCK_GIANT(vfslocked);
1220 * Helper function for vm_mmap. Perform sanity check specific for mmap
1221 * operations on cdevs.
1224 vm_mmap_cdev(struct thread *td, vm_size_t objsize,
1225 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1226 struct cdev *cdev, vm_ooffset_t *foff, vm_object_t *objp)
1230 int error, flags, ref;
1234 dsw = dev_refthread(cdev, &ref);
1237 if (dsw->d_flags & D_MMAP_ANON) {
1238 dev_relthread(cdev, ref);
1239 *maxprotp = VM_PROT_ALL;
1240 *flagsp |= MAP_ANON;
1244 * cdevs do not provide private mappings of any kind.
1246 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1247 (prot & PROT_WRITE) != 0) {
1248 dev_relthread(cdev, ref);
1251 if (flags & (MAP_PRIVATE|MAP_COPY)) {
1252 dev_relthread(cdev, ref);
1256 * Force device mappings to be shared.
1258 flags |= MAP_SHARED;
1260 error = mac_cdev_check_mmap(td->td_ucred, cdev, prot);
1262 dev_relthread(cdev, ref);
1267 * First, try d_mmap_single(). If that is not implemented
1268 * (returns ENODEV), fall back to using the device pager.
1269 * Note that d_mmap_single() must return a reference to the
1270 * object (it needs to bump the reference count of the object
1271 * it returns somehow).
1273 * XXX assumes VM_PROT_* == PROT_*
1275 error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot);
1276 dev_relthread(cdev, ref);
1277 if (error != ENODEV)
1279 obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
1293 * Helper function for vm_mmap. Perform sanity check specific for mmap
1294 * operations on shm file descriptors.
1297 vm_mmap_shm(struct thread *td, vm_size_t objsize,
1298 vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp,
1299 struct shmfd *shmfd, vm_ooffset_t foff, vm_object_t *objp)
1303 if ((*flagsp & MAP_SHARED) != 0 &&
1304 (*maxprotp & VM_PROT_WRITE) == 0 &&
1305 (prot & PROT_WRITE) != 0)
1308 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, *flagsp);
1312 error = shm_mmap(shmfd, objsize, foff, objp);
1323 * Internal version of mmap. Currently used by mmap, exec, and sys5
1324 * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON.
1327 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1328 vm_prot_t maxprot, int flags,
1329 objtype_t handle_type, void *handle,
1333 vm_object_t object = NULL;
1334 int rv = KERN_SUCCESS;
1336 struct thread *td = curthread;
1341 size = round_page(size);
1343 PROC_LOCK(td->td_proc);
1344 if (td->td_proc->p_vmspace->vm_map.size + size >
1345 lim_cur(td->td_proc, RLIMIT_VMEM)) {
1346 PROC_UNLOCK(td->td_proc);
1349 PROC_UNLOCK(td->td_proc);
1352 * We currently can only deal with page aligned file offsets.
1353 * The check is here rather than in the syscall because the
1354 * kernel calls this function internally for other mmaping
1355 * operations (such as in exec) and non-aligned offsets will
1356 * cause pmap inconsistencies...so we want to be sure to
1357 * disallow this in all cases.
1359 if (foff & PAGE_MASK)
1362 if ((flags & MAP_FIXED) == 0) {
1364 *addr = round_page(*addr);
1366 if (*addr != trunc_page(*addr))
1371 * Lookup/allocate object.
1373 switch (handle_type) {
1375 error = vm_mmap_cdev(td, size, prot, &maxprot, &flags,
1376 handle, &foff, &object);
1379 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags,
1380 handle, &foff, &object);
1383 error = vm_mmap_shm(td, size, prot, &maxprot, &flags,
1384 handle, foff, &object);
1387 if (handle == NULL) {
1398 if (flags & MAP_ANON) {
1402 * Unnamed anonymous regions always start at 0.
1406 } else if (flags & MAP_PREFAULT_READ)
1407 docow = MAP_PREFAULT;
1409 docow = MAP_PREFAULT_PARTIAL;
1411 if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1412 docow |= MAP_COPY_ON_WRITE;
1413 if (flags & MAP_NOSYNC)
1414 docow |= MAP_DISABLE_SYNCER;
1415 if (flags & MAP_NOCORE)
1416 docow |= MAP_DISABLE_COREDUMP;
1418 if (flags & MAP_STACK)
1419 rv = vm_map_stack(map, *addr, size, prot, maxprot,
1420 docow | MAP_STACK_GROWS_DOWN);
1422 rv = vm_map_find(map, object, foff, addr, size,
1423 object != NULL && object->type == OBJT_DEVICE ?
1424 VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, prot, maxprot, docow);
1426 rv = vm_map_fixed(map, object, foff, *addr, size,
1427 prot, maxprot, docow);
1429 if (rv != KERN_SUCCESS) {
1431 * Lose the object reference. Will destroy the
1432 * object if it's an unnamed anonymous mapping
1433 * or named anonymous without other references.
1435 vm_object_deallocate(object);
1436 } else if (flags & MAP_SHARED) {
1438 * Shared memory is also shared with children.
1440 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1441 if (rv != KERN_SUCCESS)
1442 (void) vm_map_remove(map, *addr, *addr + size);
1446 * If the process has requested that all future mappings
1447 * be wired, then heed this.
1449 if ((rv == KERN_SUCCESS) && (map->flags & MAP_WIREFUTURE))
1450 vm_map_wire(map, *addr, *addr + size,
1451 VM_MAP_WIRE_USER | ((flags & MAP_STACK) ?
1452 VM_MAP_WIRE_HOLESOK : VM_MAP_WIRE_NOHOLES));
1454 return (vm_mmap_to_errno(rv));
1458 vm_mmap_to_errno(int rv)
1464 case KERN_INVALID_ADDRESS:
1467 case KERN_PROTECTION_FAILURE: