2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
45 * Mapped file (mmap) interface to VM
48 #include "opt_compat.h"
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
54 #include <sys/mutex.h>
55 #include <sys/sysproto.h>
56 #include <sys/filedesc.h>
58 #include <sys/resource.h>
59 #include <sys/resourcevar.h>
60 #include <sys/vnode.h>
61 #include <sys/fcntl.h>
66 #include <sys/vmmeter.h>
67 #include <sys/sysctl.h>
70 #include <vm/vm_param.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pager.h>
76 #include <vm/vm_pageout.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_kern.h>
81 #ifndef _SYS_SYSPROTO_H_
87 static int max_proc_mmap;
88 SYSCTL_INT(_vm, OID_AUTO, max_proc_mmap, CTLFLAG_RW, &max_proc_mmap, 0, "");
91 * Set the maximum number of vm_map_entry structures per process. Roughly
92 * speaking vm_map_entry structures are tiny, so allowing them to eat 1/100
93 * of our KVM malloc space still results in generous limits. We want a
94 * default that is good enough to prevent the kernel running out of resources
95 * if attacked from compromised user account but generous enough such that
96 * multi-threaded processes are not unduly inconvenienced.
98 static void vmmapentry_rsrc_init(void *);
99 SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL)
102 vmmapentry_rsrc_init(dummy)
105 max_proc_mmap = vm_kmem_size / sizeof(struct vm_map_entry);
106 max_proc_mmap /= 100;
116 struct sbrk_args *uap;
118 /* Not yet implemented */
119 /* mtx_lock(&Giant); */
120 /* mtx_unlock(&Giant); */
124 #ifndef _SYS_SYSPROTO_H_
137 struct sstk_args *uap;
139 /* Not yet implemented */
140 /* mtx_lock(&Giant); */
141 /* mtx_unlock(&Giant); */
145 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
146 #ifndef _SYS_SYSPROTO_H_
147 struct getpagesize_args {
154 ogetpagesize(td, uap)
156 struct getpagesize_args *uap;
159 td->td_retval[0] = PAGE_SIZE;
162 #endif /* COMPAT_43 || COMPAT_SUNOS */
166 * Memory Map (mmap) system call. Note that the file offset
167 * and address are allowed to be NOT page aligned, though if
168 * the MAP_FIXED flag it set, both must have the same remainder
169 * modulo the PAGE_SIZE (POSIX 1003.1b). If the address is not
170 * page-aligned, the actual mapping starts at trunc_page(addr)
171 * and the return value is adjusted up by the page offset.
173 * Generally speaking, only character devices which are themselves
174 * memory-based, such as a video framebuffer, can be mmap'd. Otherwise
175 * there would be no cache coherency between a descriptor and a VM mapping
176 * both to the same character device.
178 * Block devices can be mmap'd no matter what they represent. Cache coherency
179 * is maintained as long as you do not write directly to the underlying
182 #ifndef _SYS_SYSPROTO_H_
200 struct mmap_args *uap;
202 struct file *fp = NULL;
205 vm_size_t size, pageoff;
206 vm_prot_t prot, maxprot;
209 int disablexworkaround;
211 struct vmspace *vms = td->td_proc->p_vmspace;
214 addr = (vm_offset_t) uap->addr;
216 prot = uap->prot & VM_PROT_ALL;
222 /* make sure mapping fits into numeric range etc */
223 if ((ssize_t) uap->len < 0 ||
224 ((flags & MAP_ANON) && uap->fd != -1))
227 if (flags & MAP_STACK) {
228 if ((uap->fd != -1) ||
229 ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE)))
236 * Align the file position to a page boundary,
237 * and save its page offset component.
239 pageoff = (pos & PAGE_MASK);
242 /* Adjust size for rounding (on both ends). */
243 size += pageoff; /* low end... */
244 size = (vm_size_t) round_page(size); /* hi end */
247 * Check for illegal addresses. Watch out for address wrap... Note
248 * that VM_*_ADDRESS are not constants due to casts (argh).
250 if (flags & MAP_FIXED) {
252 * The specified address must have the same remainder
253 * as the file offset taken modulo PAGE_SIZE, so it
254 * should be aligned after adjustment by pageoff.
257 if (addr & PAGE_MASK)
259 /* Address range must be all in user VM space. */
260 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
263 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
266 if (addr + size < addr)
270 * XXX for non-fixed mappings where no hint is provided or
271 * the hint would fall in the potential heap space,
272 * place it after the end of the largest possible heap.
274 * There should really be a pmap call to determine a reasonable
277 else if (addr == 0 ||
278 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
279 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz)))
280 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
282 mtx_lock(&Giant); /* syscall marked mp-safe but isn't */
283 if (flags & MAP_ANON) {
285 * Mapping blank space is trivial.
288 maxprot = VM_PROT_ALL;
292 * Mapping file, get fp for validation. Obtain vnode and make
293 * sure it is of appropriate type.
294 * don't let the descriptor disappear on us if we block
296 if ((error = fget(td, uap->fd, &fp)) != 0)
298 if (fp->f_type != DTYPE_VNODE) {
304 * POSIX shared-memory objects are defined to have
305 * kernel persistence, and are not defined to support
306 * read(2)/write(2) -- or even open(2). Thus, we can
307 * use MAP_ASYNC to trade on-disk coherence for speed.
308 * The shm_open(3) library routine turns on the FPOSIXSHM
309 * flag to request this behavior.
311 if (fp->f_flag & FPOSIXSHM)
313 vp = (struct vnode *) fp->f_data;
314 error = vget(vp, LK_EXCLUSIVE, td);
317 if (vp->v_type != VREG && vp->v_type != VCHR) {
321 if (vp->v_type == VREG) {
323 * Get the proper underlying object
325 if (VOP_GETVOBJECT(vp, &obj) != 0) {
329 if (obj->handle != vp) {
331 vp = (struct vnode*)obj->handle;
332 vget(vp, LK_EXCLUSIVE, td);
336 * XXX hack to handle use of /dev/zero to map anon memory (ala
339 if ((vp->v_type == VCHR) &&
340 (vp->v_rdev->si_devsw->d_flags & D_MMAP_ANON)) {
342 maxprot = VM_PROT_ALL;
347 * cdevs does not provide private mappings of any kind.
350 * However, for XIG X server to continue to work,
351 * we should allow the superuser to do it anyway.
352 * We only allow it at securelevel < 1.
353 * (Because the XIG X server writes directly to video
354 * memory via /dev/mem, it should never work at any
356 * XXX this will have to go
358 if (securelevel_ge(td->td_ucred, 1))
359 disablexworkaround = 1;
361 disablexworkaround = suser(td);
362 if (vp->v_type == VCHR && disablexworkaround &&
363 (flags & (MAP_PRIVATE|MAP_COPY))) {
368 * Ensure that file and memory protections are
369 * compatible. Note that we only worry about
370 * writability if mapping is shared; in this case,
371 * current and max prot are dictated by the open file.
372 * XXX use the vnode instead? Problem is: what
373 * credentials do we use for determination? What if
374 * proc does a setuid?
376 maxprot = VM_PROT_EXECUTE; /* ??? */
377 if (fp->f_flag & FREAD) {
378 maxprot |= VM_PROT_READ;
379 } else if (prot & PROT_READ) {
384 * If we are sharing potential changes (either via
385 * MAP_SHARED or via the implicit sharing of character
386 * device mappings), and we are trying to get write
387 * permission although we opened it without asking
388 * for it, bail out. Check for superuser, only if
389 * we're at securelevel < 1, to allow the XIG X server
390 * to continue to work.
392 if ((flags & MAP_SHARED) != 0 ||
393 (vp->v_type == VCHR && disablexworkaround)) {
394 if ((fp->f_flag & FWRITE) != 0) {
398 td->td_ucred, td))) {
402 (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0) {
403 maxprot |= VM_PROT_WRITE;
404 } else if (prot & PROT_WRITE) {
408 } else if ((prot & PROT_WRITE) != 0) {
413 maxprot |= VM_PROT_WRITE;
421 * Do not allow more then a certain number of vm_map_entry structures
422 * per process. Scale with the number of rforks sharing the map
423 * to make the limit reasonable for threads.
426 vms->vm_map.nentries >= max_proc_mmap * vms->vm_refcnt) {
432 error = vm_mmap(&vms->vm_map, &addr, size, prot, maxprot,
436 td->td_retval[0] = (register_t) (addr + pageoff);
448 #ifndef _SYS_SYSPROTO_H_
461 struct ommap_args *uap;
463 struct mmap_args nargs;
464 static const char cvtbsdprot[8] = {
468 PROT_EXEC | PROT_WRITE,
470 PROT_EXEC | PROT_READ,
471 PROT_WRITE | PROT_READ,
472 PROT_EXEC | PROT_WRITE | PROT_READ,
475 #define OMAP_ANON 0x0002
476 #define OMAP_COPY 0x0020
477 #define OMAP_SHARED 0x0010
478 #define OMAP_FIXED 0x0100
480 nargs.addr = uap->addr;
481 nargs.len = uap->len;
482 nargs.prot = cvtbsdprot[uap->prot & 0x7];
484 if (uap->flags & OMAP_ANON)
485 nargs.flags |= MAP_ANON;
486 if (uap->flags & OMAP_COPY)
487 nargs.flags |= MAP_COPY;
488 if (uap->flags & OMAP_SHARED)
489 nargs.flags |= MAP_SHARED;
491 nargs.flags |= MAP_PRIVATE;
492 if (uap->flags & OMAP_FIXED)
493 nargs.flags |= MAP_FIXED;
495 nargs.pos = uap->pos;
496 return (mmap(td, &nargs));
498 #endif /* COMPAT_43 */
501 #ifndef _SYS_SYSPROTO_H_
514 struct msync_args *uap;
517 vm_size_t size, pageoff;
522 addr = (vm_offset_t) uap->addr;
526 pageoff = (addr & PAGE_MASK);
529 size = (vm_size_t) round_page(size);
530 if (addr + size < addr)
533 if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
538 map = &td->td_proc->p_vmspace->vm_map;
541 * XXX Gak! If size is zero we are supposed to sync "all modified
542 * pages with the region containing addr". Unfortunately, we don't
543 * really keep track of individual mmaps so we approximate by flushing
544 * the range of the map entry containing addr. This can be incorrect
545 * if the region splits or is coalesced with a neighbor.
548 vm_map_entry_t entry;
550 vm_map_lock_read(map);
551 rv = vm_map_lookup_entry(map, addr, &entry);
552 vm_map_unlock_read(map);
558 size = entry->end - entry->start;
562 * Clean the pages and interpret the return value.
564 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
565 (flags & MS_INVALIDATE) != 0);
573 case KERN_INVALID_ADDRESS:
574 return (EINVAL); /* Sun returns ENOMEM? */
582 #ifndef _SYS_SYSPROTO_H_
594 struct munmap_args *uap;
597 vm_size_t size, pageoff;
600 addr = (vm_offset_t) uap->addr;
603 pageoff = (addr & PAGE_MASK);
606 size = (vm_size_t) round_page(size);
607 if (addr + size < addr)
614 * Check for illegal addresses. Watch out for address wrap... Note
615 * that VM_*_ADDRESS are not constants due to casts (argh).
617 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
620 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
623 map = &td->td_proc->p_vmspace->vm_map;
625 * Make sure entire range is allocated.
627 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
630 /* returns nothing but KERN_SUCCESS anyway */
631 (void) vm_map_remove(map, addr, addr + size);
642 * XXX should unmap any regions mapped to this file
644 FILEDESC_LOCK(p->p_fd);
645 td->td_proc->p_fd->fd_ofileflags[fd] &= ~UF_MAPPED;
646 FILEDESC_UNLOCK(p->p_fd);
650 #ifndef _SYS_SYSPROTO_H_
651 struct mprotect_args {
663 struct mprotect_args *uap;
666 vm_size_t size, pageoff;
669 addr = (vm_offset_t) uap->addr;
671 prot = uap->prot & VM_PROT_ALL;
672 #if defined(VM_PROT_READ_IS_EXEC)
673 if (prot & VM_PROT_READ)
674 prot |= VM_PROT_EXECUTE;
677 pageoff = (addr & PAGE_MASK);
680 size = (vm_size_t) round_page(size);
681 if (addr + size < addr)
684 switch (vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr,
685 addr + size, prot, FALSE)) {
688 case KERN_PROTECTION_FAILURE:
694 #ifndef _SYS_SYSPROTO_H_
695 struct minherit_args {
707 struct minherit_args *uap;
710 vm_size_t size, pageoff;
711 vm_inherit_t inherit;
713 addr = (vm_offset_t)uap->addr;
715 inherit = uap->inherit;
717 pageoff = (addr & PAGE_MASK);
720 size = (vm_size_t) round_page(size);
721 if (addr + size < addr)
724 switch (vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr,
725 addr + size, inherit)) {
728 case KERN_PROTECTION_FAILURE:
734 #ifndef _SYS_SYSPROTO_H_
735 struct madvise_args {
749 struct madvise_args *uap;
751 vm_offset_t start, end;
754 * Check for illegal behavior
756 if (uap->behav < 0 || uap->behav > MADV_CORE)
759 * Check for illegal addresses. Watch out for address wrap... Note
760 * that VM_*_ADDRESS are not constants due to casts (argh).
762 if (VM_MAXUSER_ADDRESS > 0 &&
763 ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS)
766 if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS)
769 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
773 * Since this routine is only advisory, we default to conservative
776 start = trunc_page((vm_offset_t) uap->addr);
777 end = round_page((vm_offset_t) uap->addr + uap->len);
779 if (vm_map_madvise(&td->td_proc->p_vmspace->vm_map, start, end,
785 #ifndef _SYS_SYSPROTO_H_
786 struct mincore_args {
800 struct mincore_args *uap;
802 vm_offset_t addr, first_addr;
803 vm_offset_t end, cend;
808 int vecindex, lastvecindex;
809 vm_map_entry_t current;
810 vm_map_entry_t entry;
812 unsigned int timestamp;
815 * Make sure that the addresses presented are valid for user
818 first_addr = addr = trunc_page((vm_offset_t) uap->addr);
819 end = addr + (vm_size_t)round_page(uap->len);
820 if (VM_MAXUSER_ADDRESS > 0 && end > VM_MAXUSER_ADDRESS)
826 * Address of byte vector
831 map = &td->td_proc->p_vmspace->vm_map;
832 pmap = vmspace_pmap(td->td_proc->p_vmspace);
834 vm_map_lock_read(map);
836 timestamp = map->timestamp;
838 if (!vm_map_lookup_entry(map, addr, &entry))
842 * Do this on a map entry basis so that if the pages are not
843 * in the current processes address space, we can easily look
844 * up the pages elsewhere.
847 for (current = entry;
848 (current != &map->header) && (current->start < end);
849 current = current->next) {
852 * ignore submaps (for now) or null objects
854 if ((current->eflags & MAP_ENTRY_IS_SUB_MAP) ||
855 current->object.vm_object == NULL)
859 * limit this scan to the current map entry and the
860 * limits for the mincore call
862 if (addr < current->start)
863 addr = current->start;
869 * scan this entry one page at a time
871 while (addr < cend) {
873 * Check pmap first, it is likely faster, also
874 * it can provide info as to whether we are the
875 * one referencing or modifying the page.
877 mincoreinfo = pmap_mincore(pmap, addr);
883 * calculate the page index into the object
885 offset = current->offset + (addr - current->start);
886 pindex = OFF_TO_IDX(offset);
887 m = vm_page_lookup(current->object.vm_object,
890 * if the page is resident, then gather information about
894 mincoreinfo = MINCORE_INCORE;
897 mincoreinfo |= MINCORE_MODIFIED_OTHER;
898 if ((m->flags & PG_REFERENCED) ||
899 pmap_ts_referenced(m)) {
900 vm_page_flag_set(m, PG_REFERENCED);
901 mincoreinfo |= MINCORE_REFERENCED_OTHER;
907 * subyte may page fault. In case it needs to modify
908 * the map, we release the lock.
910 vm_map_unlock_read(map);
913 * calculate index into user supplied byte vector
915 vecindex = OFF_TO_IDX(addr - first_addr);
918 * If we have skipped map entries, we need to make sure that
919 * the byte vector is zeroed for those skipped entries.
921 while ((lastvecindex + 1) < vecindex) {
922 error = subyte(vec + lastvecindex, 0);
931 * Pass the page information to the user
933 error = subyte(vec + vecindex, mincoreinfo);
940 * If the map has changed, due to the subyte, the previous
941 * output may be invalid.
943 vm_map_lock_read(map);
944 if (timestamp != map->timestamp)
947 lastvecindex = vecindex;
953 * subyte may page fault. In case it needs to modify
954 * the map, we release the lock.
956 vm_map_unlock_read(map);
959 * Zero the last entries in the byte vector.
961 vecindex = OFF_TO_IDX(end - first_addr);
962 while ((lastvecindex + 1) < vecindex) {
963 error = subyte(vec + lastvecindex, 0);
972 * If the map has changed, due to the subyte, the previous
973 * output may be invalid.
975 vm_map_lock_read(map);
976 if (timestamp != map->timestamp)
978 vm_map_unlock_read(map);
984 #ifndef _SYS_SYSPROTO_H_
996 struct mlock_args *uap;
999 vm_size_t size, pageoff;
1002 addr = (vm_offset_t) uap->addr;
1005 pageoff = (addr & PAGE_MASK);
1008 size = (vm_size_t) round_page(size);
1010 /* disable wrap around */
1011 if (addr + size < addr)
1014 if (atop(size) + cnt.v_wire_count > vm_page_max_wired)
1017 #ifdef pmap_wired_count
1018 if (size + ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))) >
1019 td->td_proc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
1027 error = vm_map_wire(&td->td_proc->p_vmspace->vm_map, addr,
1029 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1032 #ifndef _SYS_SYSPROTO_H_
1033 struct mlockall_args {
1044 struct mlockall_args *uap;
1046 /* mtx_lock(&Giant); */
1047 /* mtx_unlock(&Giant); */
1051 #ifndef _SYS_SYSPROTO_H_
1052 struct munlockall_args {
1063 struct munlockall_args *uap;
1065 /* mtx_lock(&Giant); */
1066 /* mtx_unlock(&Giant); */
1070 #ifndef _SYS_SYSPROTO_H_
1071 struct munlock_args {
1082 struct munlock_args *uap;
1085 vm_size_t size, pageoff;
1088 addr = (vm_offset_t) uap->addr;
1091 pageoff = (addr & PAGE_MASK);
1094 size = (vm_size_t) round_page(size);
1096 /* disable wrap around */
1097 if (addr + size < addr)
1100 #ifndef pmap_wired_count
1106 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, addr,
1108 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1116 * Internal version of mmap. Currently used by mmap, exec, and sys5
1117 * shared memory. Handle is either a vnode pointer or NULL for MAP_ANON.
1120 vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
1121 vm_prot_t maxprot, int flags,
1127 struct vnode *vp = NULL;
1129 int rv = KERN_SUCCESS;
1130 vm_ooffset_t objsize;
1132 struct thread *td = curthread;
1137 objsize = size = round_page(size);
1139 if (td->td_proc->p_vmspace->vm_map.size + size >
1140 td->td_proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1145 * We currently can only deal with page aligned file offsets.
1146 * The check is here rather than in the syscall because the
1147 * kernel calls this function internally for other mmaping
1148 * operations (such as in exec) and non-aligned offsets will
1149 * cause pmap inconsistencies...so we want to be sure to
1150 * disallow this in all cases.
1152 if (foff & PAGE_MASK)
1155 if ((flags & MAP_FIXED) == 0) {
1157 *addr = round_page(*addr);
1159 if (*addr != trunc_page(*addr))
1162 (void) vm_map_remove(map, *addr, *addr + size);
1166 * Lookup/allocate object.
1168 if (flags & MAP_ANON) {
1169 type = OBJT_DEFAULT;
1171 * Unnamed anonymous regions always start at 0.
1176 vp = (struct vnode *) handle;
1178 ASSERT_VOP_LOCKED(vp, "vm_mmap");
1179 if (vp->v_type == VCHR) {
1181 handle = (void *)(intptr_t)vp->v_rdev;
1186 error = VOP_GETATTR(vp, &vat, td->td_ucred, td);
1191 objsize = round_page(vat.va_size);
1194 * if it is a regular file without any references
1195 * we do not need to sync it.
1197 if (vp->v_type == VREG && vat.va_nlink == 0) {
1198 flags |= MAP_NOSYNC;
1204 if (handle == NULL) {
1208 object = vm_pager_allocate(type,
1209 handle, objsize, prot, foff);
1210 if (object == NULL) {
1211 return (type == OBJT_DEVICE ? EINVAL : ENOMEM);
1213 docow = MAP_PREFAULT_PARTIAL;
1217 * Force device mappings to be shared.
1219 if (type == OBJT_DEVICE || type == OBJT_PHYS) {
1220 flags &= ~(MAP_PRIVATE|MAP_COPY);
1221 flags |= MAP_SHARED;
1224 if ((flags & (MAP_ANON|MAP_SHARED)) == 0)
1225 docow |= MAP_COPY_ON_WRITE;
1226 if (flags & MAP_NOSYNC)
1227 docow |= MAP_DISABLE_SYNCER;
1228 if (flags & MAP_NOCORE)
1229 docow |= MAP_DISABLE_COREDUMP;
1231 #if defined(VM_PROT_READ_IS_EXEC)
1232 if (prot & VM_PROT_READ)
1233 prot |= VM_PROT_EXECUTE;
1235 if (maxprot & VM_PROT_READ)
1236 maxprot |= VM_PROT_EXECUTE;
1240 *addr = pmap_addr_hint(object, *addr, size);
1242 if (flags & MAP_STACK)
1243 rv = vm_map_stack (map, *addr, size, prot,
1246 rv = vm_map_find(map, object, foff, addr, size, fitit,
1247 prot, maxprot, docow);
1249 if (rv != KERN_SUCCESS) {
1251 * Lose the object reference. Will destroy the
1252 * object if it's an unnamed anonymous mapping
1253 * or named anonymous without other references.
1255 vm_object_deallocate(object);
1256 } else if (flags & MAP_SHARED) {
1258 * Shared memory is also shared with children.
1260 rv = vm_map_inherit(map, *addr, *addr + size, VM_INHERIT_SHARE);
1261 if (rv != KERN_SUCCESS)
1262 (void) vm_map_remove(map, *addr, *addr + size);
1267 case KERN_INVALID_ADDRESS:
1270 case KERN_PROTECTION_FAILURE: