2 * Copyright (c) 2006, 2011 Robert N. M. Watson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Support for shared swap-backed anonymous memory objects via
29 * shm_open(2) and shm_unlink(2). While most of the implementation is
30 * here, vm_mmap.c contains mapping logic changes.
34 * (1) Need to export data to a userland tool via a sysctl. Should ipcs(1)
35 * and ipcrm(1) be expanded or should new tools to manage both POSIX
36 * kernel semaphores and POSIX shared memory be written?
38 * (2) Add support for this file type to fstat(1).
40 * (3) Resource limits? Does this need its own resource limits or are the
41 * existing limits in mmap(2) sufficient?
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include "opt_capsicum.h"
48 #include "opt_ktrace.h"
50 #include <sys/param.h>
51 #include <sys/capsicum.h>
53 #include <sys/fcntl.h>
55 #include <sys/filedesc.h>
56 #include <sys/fnv_hash.h>
57 #include <sys/kernel.h>
59 #include <sys/signal.h>
60 #include <sys/ktrace.h>
62 #include <sys/malloc.h>
64 #include <sys/mutex.h>
67 #include <sys/refcount.h>
68 #include <sys/resourcevar.h>
69 #include <sys/rwlock.h>
71 #include <sys/syscallsubr.h>
72 #include <sys/sysctl.h>
73 #include <sys/sysproto.h>
74 #include <sys/systm.h>
77 #include <sys/vnode.h>
78 #include <sys/unistd.h>
81 #include <security/mac/mac_framework.h>
84 #include <vm/vm_param.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_pager.h>
93 #include <vm/swap_pager.h>
98 struct shmfd *sm_shmfd;
99 LIST_ENTRY(shm_mapping) sm_link;
102 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
103 static LIST_HEAD(, shm_mapping) *shm_dictionary;
104 static struct sx shm_dict_lock;
105 static struct mtx shm_timestamp_lock;
106 static u_long shm_hash;
107 static struct unrhdr *shm_ino_unr;
108 static dev_t shm_dev_ino;
110 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
112 static void shm_init(void *arg);
113 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
114 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
115 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
117 static fo_rdwr_t shm_read;
118 static fo_rdwr_t shm_write;
119 static fo_truncate_t shm_truncate;
120 static fo_stat_t shm_stat;
121 static fo_close_t shm_close;
122 static fo_chmod_t shm_chmod;
123 static fo_chown_t shm_chown;
124 static fo_seek_t shm_seek;
125 static fo_fill_kinfo_t shm_fill_kinfo;
126 static fo_mmap_t shm_mmap;
128 /* File descriptor operations. */
129 struct fileops shm_ops = {
131 .fo_write = shm_write,
132 .fo_truncate = shm_truncate,
133 .fo_ioctl = invfo_ioctl,
134 .fo_poll = invfo_poll,
135 .fo_kqfilter = invfo_kqfilter,
137 .fo_close = shm_close,
138 .fo_chmod = shm_chmod,
139 .fo_chown = shm_chown,
140 .fo_sendfile = vn_sendfile,
142 .fo_fill_kinfo = shm_fill_kinfo,
144 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
147 FEATURE(posix_shm, "POSIX shared memory");
150 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
155 int error, offset, rv;
157 idx = OFF_TO_IDX(uio->uio_offset);
158 offset = uio->uio_offset & PAGE_MASK;
159 tlen = MIN(PAGE_SIZE - offset, len);
161 VM_OBJECT_WLOCK(obj);
164 * Read I/O without either a corresponding resident page or swap
165 * page: use zero_region. This is intended to avoid instantiating
166 * pages on read from a sparse region.
168 if (uio->uio_rw == UIO_READ && vm_page_lookup(obj, idx) == NULL &&
169 !vm_pager_has_page(obj, idx, NULL, NULL)) {
170 VM_OBJECT_WUNLOCK(obj);
171 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
175 * Parallel reads of the page content from disk are prevented
178 * Although the tmpfs vnode lock is held here, it is
179 * nonetheless safe to sleep waiting for a free page. The
180 * pageout daemon does not need to acquire the tmpfs vnode
181 * lock to page out tobj's pages because tobj is a OBJT_SWAP
184 m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
185 if (m->valid != VM_PAGE_BITS_ALL) {
186 if (vm_pager_has_page(obj, idx, NULL, NULL)) {
187 rv = vm_pager_get_pages(obj, &m, 1, NULL, NULL);
188 if (rv != VM_PAGER_OK) {
190 "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
191 obj, idx, m->valid, rv);
195 VM_OBJECT_WUNLOCK(obj);
199 vm_page_zero_invalid(m, TRUE);
204 if (m->queue == PQ_NONE) {
205 vm_page_deactivate(m);
207 /* Requeue to maintain LRU ordering. */
211 VM_OBJECT_WUNLOCK(obj);
212 error = uiomove_fromphys(&m, offset, tlen, uio);
213 if (uio->uio_rw == UIO_WRITE && error == 0) {
214 VM_OBJECT_WLOCK(obj);
216 vm_pager_page_unswapped(m);
217 VM_OBJECT_WUNLOCK(obj);
227 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
234 while ((resid = uio->uio_resid) > 0) {
235 if (obj_size <= uio->uio_offset)
237 len = MIN(obj_size - uio->uio_offset, resid);
240 error = uiomove_object_page(obj, len, uio);
241 if (error != 0 || resid == uio->uio_resid)
248 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
255 foffset = foffset_lock(fp, 0);
260 (offset > 0 && foffset > OFF_MAX - offset)) {
267 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
271 offset += shmfd->shm_size;
279 if (offset < 0 || offset > shmfd->shm_size)
282 td->td_uretoff.tdu_off = offset;
284 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
289 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
290 int flags, struct thread *td)
297 foffset_lock_uio(fp, uio, flags);
298 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
299 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
301 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
305 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
306 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
307 foffset_unlock_uio(fp, uio, flags);
312 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
313 int flags, struct thread *td)
321 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
325 foffset_lock_uio(fp, uio, flags);
326 if ((flags & FOF_OFFSET) == 0) {
327 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
330 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
331 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
334 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
335 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
336 foffset_unlock_uio(fp, uio, flags);
341 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
351 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
355 return (shm_dotruncate(shmfd, length));
359 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
370 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
376 * Attempt to return sanish values for fstat() on a memory file
379 bzero(sb, sizeof(*sb));
380 sb->st_blksize = PAGE_SIZE;
381 sb->st_size = shmfd->shm_size;
382 sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
383 mtx_lock(&shm_timestamp_lock);
384 sb->st_atim = shmfd->shm_atime;
385 sb->st_ctim = shmfd->shm_ctime;
386 sb->st_mtim = shmfd->shm_mtime;
387 sb->st_birthtim = shmfd->shm_birthtime;
388 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
389 sb->st_uid = shmfd->shm_uid;
390 sb->st_gid = shmfd->shm_gid;
391 mtx_unlock(&shm_timestamp_lock);
392 sb->st_dev = shm_dev_ino;
393 sb->st_ino = shmfd->shm_ino;
399 shm_close(struct file *fp, struct thread *td)
411 shm_dotruncate(struct shmfd *shmfd, off_t length)
415 vm_pindex_t idx, nobjsize;
419 object = shmfd->shm_object;
420 VM_OBJECT_WLOCK(object);
421 if (length == shmfd->shm_size) {
422 VM_OBJECT_WUNLOCK(object);
425 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
427 /* Are we shrinking? If so, trim the end. */
428 if (length < shmfd->shm_size) {
430 * Disallow any requests to shrink the size if this
431 * object is mapped into the kernel.
433 if (shmfd->shm_kmappings > 0) {
434 VM_OBJECT_WUNLOCK(object);
439 * Zero the truncated part of the last page.
441 base = length & PAGE_MASK;
443 idx = OFF_TO_IDX(length);
445 m = vm_page_lookup(object, idx);
447 if (vm_page_sleep_if_busy(m, "shmtrc"))
449 } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
450 m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
452 VM_OBJECT_WUNLOCK(object);
454 VM_OBJECT_WLOCK(object);
456 } else if (m->valid != VM_PAGE_BITS_ALL)
457 rv = vm_pager_get_pages(object, &m, 1,
460 /* A cached page was reactivated. */
463 if (rv == VM_PAGER_OK) {
464 vm_page_deactivate(m);
470 VM_OBJECT_WUNLOCK(object);
475 pmap_zero_page_area(m, base, PAGE_SIZE - base);
476 KASSERT(m->valid == VM_PAGE_BITS_ALL,
477 ("shm_dotruncate: page %p is invalid", m));
479 vm_pager_page_unswapped(m);
482 delta = ptoa(object->size - nobjsize);
484 /* Toss in memory pages. */
485 if (nobjsize < object->size)
486 vm_object_page_remove(object, nobjsize, object->size,
489 /* Toss pages from swap. */
490 if (object->type == OBJT_SWAP)
491 swap_pager_freespace(object, nobjsize, delta);
493 /* Free the swap accounted for shm */
494 swap_release_by_cred(delta, object->cred);
495 object->charge -= delta;
497 /* Attempt to reserve the swap */
498 delta = ptoa(nobjsize - object->size);
499 if (!swap_reserve_by_cred(delta, object->cred)) {
500 VM_OBJECT_WUNLOCK(object);
503 object->charge += delta;
505 shmfd->shm_size = length;
506 mtx_lock(&shm_timestamp_lock);
507 vfs_timestamp(&shmfd->shm_ctime);
508 shmfd->shm_mtime = shmfd->shm_ctime;
509 mtx_unlock(&shm_timestamp_lock);
510 object->size = nobjsize;
511 VM_OBJECT_WUNLOCK(object);
516 * shmfd object management including creation and reference counting
520 shm_alloc(struct ucred *ucred, mode_t mode)
525 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
527 shmfd->shm_uid = ucred->cr_uid;
528 shmfd->shm_gid = ucred->cr_gid;
529 shmfd->shm_mode = mode;
530 shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
531 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
532 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
533 shmfd->shm_object->pg_color = 0;
534 VM_OBJECT_WLOCK(shmfd->shm_object);
535 vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
536 vm_object_set_flag(shmfd->shm_object, OBJ_COLORED | OBJ_NOSPLIT);
537 VM_OBJECT_WUNLOCK(shmfd->shm_object);
538 vfs_timestamp(&shmfd->shm_birthtime);
539 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
540 shmfd->shm_birthtime;
541 ino = alloc_unr(shm_ino_unr);
545 shmfd->shm_ino = ino;
546 refcount_init(&shmfd->shm_refs, 1);
547 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
548 rangelock_init(&shmfd->shm_rl);
550 mac_posixshm_init(shmfd);
551 mac_posixshm_create(ucred, shmfd);
558 shm_hold(struct shmfd *shmfd)
561 refcount_acquire(&shmfd->shm_refs);
566 shm_drop(struct shmfd *shmfd)
569 if (refcount_release(&shmfd->shm_refs)) {
571 mac_posixshm_destroy(shmfd);
573 rangelock_destroy(&shmfd->shm_rl);
574 mtx_destroy(&shmfd->shm_mtx);
575 vm_object_deallocate(shmfd->shm_object);
576 if (shmfd->shm_ino != 0)
577 free_unr(shm_ino_unr, shmfd->shm_ino);
578 free(shmfd, M_SHMFD);
583 * Determine if the credentials have sufficient permissions for a
584 * specified combination of FREAD and FWRITE.
587 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
597 mtx_lock(&shm_timestamp_lock);
598 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
599 accmode, ucred, NULL);
600 mtx_unlock(&shm_timestamp_lock);
605 * Dictionary management. We maintain an in-kernel dictionary to map
606 * paths to shmfd objects. We use the FNV hash on the path to store
607 * the mappings in a hash table.
613 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
614 sx_init(&shm_dict_lock, "shm dictionary");
615 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
616 shm_ino_unr = new_unrhdr(1, INT32_MAX, NULL);
617 KASSERT(shm_ino_unr != NULL, ("shm fake inodes not initialized"));
618 shm_dev_ino = devfs_alloc_cdp_inode();
619 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
621 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
623 static struct shmfd *
624 shm_lookup(char *path, Fnv32_t fnv)
626 struct shm_mapping *map;
628 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
629 if (map->sm_fnv != fnv)
631 if (strcmp(map->sm_path, path) == 0)
632 return (map->sm_shmfd);
639 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
641 struct shm_mapping *map;
643 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
646 map->sm_shmfd = shm_hold(shmfd);
647 shmfd->shm_path = path;
648 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
652 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
654 struct shm_mapping *map;
657 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
658 if (map->sm_fnv != fnv)
660 if (strcmp(map->sm_path, path) == 0) {
662 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
666 error = shm_access(map->sm_shmfd, ucred,
670 map->sm_shmfd->shm_path = NULL;
671 LIST_REMOVE(map, sm_link);
672 shm_drop(map->sm_shmfd);
673 free(map->sm_path, M_SHMFD);
683 kern_shm_open(struct thread *td, const char *userpath, int flags, mode_t mode,
684 struct filecaps *fcaps)
686 struct filedesc *fdp;
694 #ifdef CAPABILITY_MODE
696 * shm_open(2) is only allowed for anonymous objects.
698 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
702 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
705 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
708 fdp = td->td_proc->p_fd;
709 cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
711 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, fcaps);
715 /* A SHM_ANON path pointer creates an anonymous object. */
716 if (userpath == SHM_ANON) {
717 /* A read-only anonymous object is pointless. */
718 if ((flags & O_ACCMODE) == O_RDONLY) {
723 shmfd = shm_alloc(td->td_ucred, cmode);
725 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
726 error = copyinstr(userpath, path, MAXPATHLEN, NULL);
728 if (error == 0 && KTRPOINT(curthread, KTR_NAMEI))
731 /* Require paths to start with a '/' character. */
732 if (error == 0 && path[0] != '/')
741 fnv = fnv_32_str(path, FNV1_32_INIT);
742 sx_xlock(&shm_dict_lock);
743 shmfd = shm_lookup(path, fnv);
745 /* Object does not yet exist, create it if requested. */
746 if (flags & O_CREAT) {
748 error = mac_posixshm_check_create(td->td_ucred,
752 shmfd = shm_alloc(td->td_ucred, cmode);
753 shm_insert(path, fnv, shmfd);
763 * Object already exists, obtain a new
764 * reference if requested and permitted.
767 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
771 error = mac_posixshm_check_open(td->td_ucred,
772 shmfd, FFLAGS(flags & O_ACCMODE));
775 error = shm_access(shmfd, td->td_ucred,
776 FFLAGS(flags & O_ACCMODE));
780 * Truncate the file back to zero length if
781 * O_TRUNC was specified and the object was
782 * opened with read/write.
785 (flags & (O_ACCMODE | O_TRUNC)) ==
786 (O_RDWR | O_TRUNC)) {
788 error = mac_posixshm_check_truncate(
789 td->td_ucred, fp->f_cred, shmfd);
792 shm_dotruncate(shmfd, 0);
797 sx_xunlock(&shm_dict_lock);
806 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
808 td->td_retval[0] = fd;
816 sys_shm_open(struct thread *td, struct shm_open_args *uap)
819 return (kern_shm_open(td, uap->path, uap->flags, uap->mode, NULL));
823 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
829 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
830 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
836 if (KTRPOINT(curthread, KTR_NAMEI))
839 fnv = fnv_32_str(path, FNV1_32_INIT);
840 sx_xlock(&shm_dict_lock);
841 error = shm_remove(path, fnv, td->td_ucred);
842 sx_xunlock(&shm_dict_lock);
849 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
850 vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
851 vm_ooffset_t foff, struct thread *td)
858 maxprot = VM_PROT_NONE;
860 /* FREAD should always be set. */
861 if ((fp->f_flag & FREAD) != 0)
862 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
863 if ((fp->f_flag & FWRITE) != 0)
864 maxprot |= VM_PROT_WRITE;
866 /* Don't permit shared writable mappings on read-only descriptors. */
867 if ((flags & MAP_SHARED) != 0 &&
868 (maxprot & VM_PROT_WRITE) == 0 &&
869 (prot & VM_PROT_WRITE) != 0)
871 maxprot &= cap_maxprot;
874 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
880 * XXXRW: This validation is probably insufficient, and subject to
881 * sign errors. It should be fixed.
883 if (foff >= shmfd->shm_size ||
884 foff + objsize > round_page(shmfd->shm_size))
887 mtx_lock(&shm_timestamp_lock);
888 vfs_timestamp(&shmfd->shm_atime);
889 mtx_unlock(&shm_timestamp_lock);
890 vm_object_reference(shmfd->shm_object);
892 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
893 shmfd->shm_object, foff, FALSE, td);
895 vm_object_deallocate(shmfd->shm_object);
900 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
908 mtx_lock(&shm_timestamp_lock);
910 * SUSv4 says that x bits of permission need not be affected.
911 * Be consistent with our shm_open there.
914 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
918 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid,
919 shmfd->shm_gid, VADMIN, active_cred, NULL);
922 shmfd->shm_mode = mode & ACCESSPERMS;
924 mtx_unlock(&shm_timestamp_lock);
929 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
937 mtx_lock(&shm_timestamp_lock);
939 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
943 if (uid == (uid_t)-1)
944 uid = shmfd->shm_uid;
945 if (gid == (gid_t)-1)
946 gid = shmfd->shm_gid;
947 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
948 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
949 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN, 0)))
951 shmfd->shm_uid = uid;
952 shmfd->shm_gid = gid;
954 mtx_unlock(&shm_timestamp_lock);
959 * Helper routines to allow the backing object of a shared memory file
960 * descriptor to be mapped in the kernel.
963 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
966 vm_offset_t kva, ofs;
970 if (fp->f_type != DTYPE_SHM)
973 obj = shmfd->shm_object;
974 VM_OBJECT_WLOCK(obj);
976 * XXXRW: This validation is probably insufficient, and subject to
977 * sign errors. It should be fixed.
979 if (offset >= shmfd->shm_size ||
980 offset + size > round_page(shmfd->shm_size)) {
981 VM_OBJECT_WUNLOCK(obj);
985 shmfd->shm_kmappings++;
986 vm_object_reference_locked(obj);
987 VM_OBJECT_WUNLOCK(obj);
989 /* Map the object into the kernel_map and wire it. */
990 kva = vm_map_min(kernel_map);
991 ofs = offset & PAGE_MASK;
992 offset = trunc_page(offset);
993 size = round_page(size + ofs);
994 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
995 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
996 VM_PROT_READ | VM_PROT_WRITE, 0);
997 if (rv == KERN_SUCCESS) {
998 rv = vm_map_wire(kernel_map, kva, kva + size,
999 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1000 if (rv == KERN_SUCCESS) {
1001 *memp = (void *)(kva + ofs);
1004 vm_map_remove(kernel_map, kva, kva + size);
1006 vm_object_deallocate(obj);
1008 /* On failure, drop our mapping reference. */
1009 VM_OBJECT_WLOCK(obj);
1010 shmfd->shm_kmappings--;
1011 VM_OBJECT_WUNLOCK(obj);
1013 return (vm_mmap_to_errno(rv));
1017 * We require the caller to unmap the entire entry. This allows us to
1018 * safely decrement shm_kmappings when a mapping is removed.
1021 shm_unmap(struct file *fp, void *mem, size_t size)
1023 struct shmfd *shmfd;
1024 vm_map_entry_t entry;
1025 vm_offset_t kva, ofs;
1033 if (fp->f_type != DTYPE_SHM)
1036 kva = (vm_offset_t)mem;
1037 ofs = kva & PAGE_MASK;
1038 kva = trunc_page(kva);
1039 size = round_page(size + ofs);
1041 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1042 &obj, &pindex, &prot, &wired);
1043 if (rv != KERN_SUCCESS)
1045 if (entry->start != kva || entry->end != kva + size) {
1046 vm_map_lookup_done(map, entry);
1049 vm_map_lookup_done(map, entry);
1050 if (obj != shmfd->shm_object)
1052 vm_map_remove(map, kva, kva + size);
1053 VM_OBJECT_WLOCK(obj);
1054 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1055 shmfd->shm_kmappings--;
1056 VM_OBJECT_WUNLOCK(obj);
1061 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1063 struct shmfd *shmfd;
1065 kif->kf_type = KF_TYPE_SHM;
1068 mtx_lock(&shm_timestamp_lock);
1069 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode; /* XXX */
1070 mtx_unlock(&shm_timestamp_lock);
1071 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1072 if (shmfd->shm_path != NULL) {
1073 sx_slock(&shm_dict_lock);
1074 if (shmfd->shm_path != NULL)
1075 strlcpy(kif->kf_path, shmfd->shm_path,
1076 sizeof(kif->kf_path));
1077 sx_sunlock(&shm_dict_lock);