2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
7 * Portions of this software were developed by BAE Systems, the University of
8 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
9 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
10 * Computing (TC) research program.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Support for shared swap-backed anonymous memory objects via
36 * shm_open(2), shm_rename(2), and shm_unlink(2).
37 * While most of the implementation is here, vm_mmap.c contains
38 * mapping logic changes.
40 * posixshmcontrol(1) allows users to inspect the state of the memory
41 * objects. Per-uid swap resource limit controls total amount of
42 * memory that user can consume for anonymous objects, including
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include "opt_capsicum.h"
50 #include "opt_ktrace.h"
52 #include <sys/param.h>
53 #include <sys/capsicum.h>
55 #include <sys/fcntl.h>
57 #include <sys/filedesc.h>
58 #include <sys/filio.h>
59 #include <sys/fnv_hash.h>
60 #include <sys/kernel.h>
61 #include <sys/limits.h>
63 #include <sys/signal.h>
65 #include <sys/ktrace.h>
67 #include <sys/malloc.h>
69 #include <sys/mutex.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
77 #include <sys/syscallsubr.h>
78 #include <sys/sysctl.h>
79 #include <sys/sysproto.h>
80 #include <sys/systm.h>
83 #include <sys/vnode.h>
84 #include <sys/unistd.h>
87 #include <security/audit/audit.h>
88 #include <security/mac/mac_framework.h>
91 #include <vm/vm_param.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_page.h>
98 #include <vm/vm_pageout.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
105 struct shmfd *sm_shmfd;
106 LIST_ENTRY(shm_mapping) sm_link;
109 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
110 static LIST_HEAD(, shm_mapping) *shm_dictionary;
111 static struct sx shm_dict_lock;
112 static struct mtx shm_timestamp_lock;
113 static u_long shm_hash;
114 static struct unrhdr64 shm_ino_unr;
115 static dev_t shm_dev_ino;
117 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
119 static void shm_init(void *arg);
120 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
121 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
122 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
123 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
125 static int shm_copyin_path(struct thread *td, const char *userpath_in,
128 static fo_rdwr_t shm_read;
129 static fo_rdwr_t shm_write;
130 static fo_truncate_t shm_truncate;
131 static fo_ioctl_t shm_ioctl;
132 static fo_stat_t shm_stat;
133 static fo_close_t shm_close;
134 static fo_chmod_t shm_chmod;
135 static fo_chown_t shm_chown;
136 static fo_seek_t shm_seek;
137 static fo_fill_kinfo_t shm_fill_kinfo;
138 static fo_mmap_t shm_mmap;
139 static fo_get_seals_t shm_get_seals;
140 static fo_add_seals_t shm_add_seals;
141 static fo_fallocate_t shm_fallocate;
143 /* File descriptor operations. */
144 struct fileops shm_ops = {
146 .fo_write = shm_write,
147 .fo_truncate = shm_truncate,
148 .fo_ioctl = shm_ioctl,
149 .fo_poll = invfo_poll,
150 .fo_kqfilter = invfo_kqfilter,
152 .fo_close = shm_close,
153 .fo_chmod = shm_chmod,
154 .fo_chown = shm_chown,
155 .fo_sendfile = vn_sendfile,
157 .fo_fill_kinfo = shm_fill_kinfo,
159 .fo_get_seals = shm_get_seals,
160 .fo_add_seals = shm_add_seals,
161 .fo_fallocate = shm_fallocate,
162 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
165 FEATURE(posix_shm, "POSIX shared memory");
168 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
173 int error, offset, rv;
175 idx = OFF_TO_IDX(uio->uio_offset);
176 offset = uio->uio_offset & PAGE_MASK;
177 tlen = MIN(PAGE_SIZE - offset, len);
179 rv = vm_page_grab_valid_unlocked(&m, obj, idx,
180 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
181 if (rv == VM_PAGER_OK)
185 * Read I/O without either a corresponding resident page or swap
186 * page: use zero_region. This is intended to avoid instantiating
187 * pages on read from a sparse region.
189 VM_OBJECT_WLOCK(obj);
190 m = vm_page_lookup(obj, idx);
191 if (uio->uio_rw == UIO_READ && m == NULL &&
192 !vm_pager_has_page(obj, idx, NULL, NULL)) {
193 VM_OBJECT_WUNLOCK(obj);
194 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
198 * Although the tmpfs vnode lock is held here, it is
199 * nonetheless safe to sleep waiting for a free page. The
200 * pageout daemon does not need to acquire the tmpfs vnode
201 * lock to page out tobj's pages because tobj is a OBJT_SWAP
204 rv = vm_page_grab_valid(&m, obj, idx,
205 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
206 if (rv != VM_PAGER_OK) {
207 VM_OBJECT_WUNLOCK(obj);
208 printf("uiomove_object: vm_obj %p idx %jd pager error %d\n",
212 VM_OBJECT_WUNLOCK(obj);
215 error = uiomove_fromphys(&m, offset, tlen, uio);
216 if (uio->uio_rw == UIO_WRITE && error == 0)
217 vm_page_set_dirty(m);
225 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
232 while ((resid = uio->uio_resid) > 0) {
233 if (obj_size <= uio->uio_offset)
235 len = MIN(obj_size - uio->uio_offset, resid);
238 error = uiomove_object_page(obj, len, uio);
239 if (error != 0 || resid == uio->uio_resid)
246 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
253 foffset = foffset_lock(fp, 0);
258 (offset > 0 && foffset > OFF_MAX - offset)) {
265 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
269 offset += shmfd->shm_size;
277 if (offset < 0 || offset > shmfd->shm_size)
280 td->td_uretoff.tdu_off = offset;
282 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
287 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
288 int flags, struct thread *td)
296 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
300 foffset_lock_uio(fp, uio, flags);
301 rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
302 uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
303 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
304 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
305 foffset_unlock_uio(fp, uio, flags);
310 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
311 int flags, struct thread *td)
320 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
324 foffset_lock_uio(fp, uio, flags);
325 if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
327 * Overflow is only an error if we're supposed to expand on
328 * write. Otherwise, we'll just truncate the write to the
329 * size of the file, which can only grow up to OFF_MAX.
331 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
332 foffset_unlock_uio(fp, uio, flags);
336 size = shmfd->shm_size;
338 size = uio->uio_offset + uio->uio_resid;
340 if ((flags & FOF_OFFSET) == 0) {
341 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
344 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
345 size, &shmfd->shm_mtx);
347 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
351 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
352 size > shmfd->shm_size) {
353 VM_OBJECT_WLOCK(shmfd->shm_object);
354 error = shm_dotruncate_locked(shmfd, size, rl_cookie);
355 VM_OBJECT_WUNLOCK(shmfd->shm_object);
358 error = uiomove_object(shmfd->shm_object,
359 shmfd->shm_size, uio);
361 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
362 foffset_unlock_uio(fp, uio, flags);
367 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
377 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
381 return (shm_dotruncate(shmfd, length));
385 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
393 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
394 * just like it would on an unlinked regular file
403 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
414 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
420 * Attempt to return sanish values for fstat() on a memory file
423 bzero(sb, sizeof(*sb));
424 sb->st_blksize = PAGE_SIZE;
425 sb->st_size = shmfd->shm_size;
426 sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
427 mtx_lock(&shm_timestamp_lock);
428 sb->st_atim = shmfd->shm_atime;
429 sb->st_ctim = shmfd->shm_ctime;
430 sb->st_mtim = shmfd->shm_mtime;
431 sb->st_birthtim = shmfd->shm_birthtime;
432 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
433 sb->st_uid = shmfd->shm_uid;
434 sb->st_gid = shmfd->shm_gid;
435 mtx_unlock(&shm_timestamp_lock);
436 sb->st_dev = shm_dev_ino;
437 sb->st_ino = shmfd->shm_ino;
438 sb->st_nlink = shmfd->shm_object->ref_count;
444 shm_close(struct file *fp, struct thread *td)
456 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
462 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
463 pr_path = td->td_ucred->cr_prison->pr_path;
465 /* Construct a full pathname for jailed callers. */
466 pr_pathlen = strcmp(pr_path, "/") ==
467 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
468 error = copyinstr(userpath_in, path + pr_pathlen,
469 MAXPATHLEN - pr_pathlen, NULL);
474 if (KTRPOINT(curthread, KTR_NAMEI))
478 /* Require paths to start with a '/' character. */
479 if (path[pr_pathlen] != '/') {
494 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
498 vm_pindex_t idx, nobjsize;
502 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
503 object = shmfd->shm_object;
504 VM_OBJECT_ASSERT_WLOCKED(object);
505 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
506 if (length == shmfd->shm_size)
508 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
510 /* Are we shrinking? If so, trim the end. */
511 if (length < shmfd->shm_size) {
512 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
516 * Disallow any requests to shrink the size if this
517 * object is mapped into the kernel.
519 if (shmfd->shm_kmappings > 0)
523 * Zero the truncated part of the last page.
525 base = length & PAGE_MASK;
527 idx = OFF_TO_IDX(length);
529 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
531 MPASS(vm_page_all_valid(m));
532 } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
533 m = vm_page_alloc(object, idx,
534 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
537 vm_object_pip_add(object, 1);
538 VM_OBJECT_WUNLOCK(object);
539 rv = vm_pager_get_pages(object, &m, 1, NULL,
541 VM_OBJECT_WLOCK(object);
542 vm_object_pip_wakeup(object);
543 if (rv == VM_PAGER_OK) {
545 * Since the page was not resident,
546 * and therefore not recently
547 * accessed, immediately enqueue it
548 * for asynchronous laundering. The
549 * current operation is not regarded
555 VM_OBJECT_WUNLOCK(object);
560 pmap_zero_page_area(m, base, PAGE_SIZE - base);
561 KASSERT(vm_page_all_valid(m),
562 ("shm_dotruncate: page %p is invalid", m));
563 vm_page_set_dirty(m);
567 delta = IDX_TO_OFF(object->size - nobjsize);
569 if (nobjsize < object->size)
570 vm_object_page_remove(object, nobjsize, object->size,
573 /* Free the swap accounted for shm */
574 swap_release_by_cred(delta, object->cred);
575 object->charge -= delta;
577 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
580 /* Try to reserve additional swap space. */
581 delta = IDX_TO_OFF(nobjsize - object->size);
582 if (!swap_reserve_by_cred(delta, object->cred))
584 object->charge += delta;
586 shmfd->shm_size = length;
587 mtx_lock(&shm_timestamp_lock);
588 vfs_timestamp(&shmfd->shm_ctime);
589 shmfd->shm_mtime = shmfd->shm_ctime;
590 mtx_unlock(&shm_timestamp_lock);
591 object->size = nobjsize;
596 shm_dotruncate(struct shmfd *shmfd, off_t length)
601 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
603 VM_OBJECT_WLOCK(shmfd->shm_object);
604 error = shm_dotruncate_locked(shmfd, length, rl_cookie);
605 VM_OBJECT_WUNLOCK(shmfd->shm_object);
606 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
611 * shmfd object management including creation and reference counting
615 shm_alloc(struct ucred *ucred, mode_t mode)
619 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
621 shmfd->shm_uid = ucred->cr_uid;
622 shmfd->shm_gid = ucred->cr_gid;
623 shmfd->shm_mode = mode;
624 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
625 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
626 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
627 vfs_timestamp(&shmfd->shm_birthtime);
628 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
629 shmfd->shm_birthtime;
630 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
631 refcount_init(&shmfd->shm_refs, 1);
632 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
633 rangelock_init(&shmfd->shm_rl);
635 mac_posixshm_init(shmfd);
636 mac_posixshm_create(ucred, shmfd);
643 shm_hold(struct shmfd *shmfd)
646 refcount_acquire(&shmfd->shm_refs);
651 shm_drop(struct shmfd *shmfd)
654 if (refcount_release(&shmfd->shm_refs)) {
656 mac_posixshm_destroy(shmfd);
658 rangelock_destroy(&shmfd->shm_rl);
659 mtx_destroy(&shmfd->shm_mtx);
660 vm_object_deallocate(shmfd->shm_object);
661 free(shmfd, M_SHMFD);
666 * Determine if the credentials have sufficient permissions for a
667 * specified combination of FREAD and FWRITE.
670 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
680 mtx_lock(&shm_timestamp_lock);
681 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
683 mtx_unlock(&shm_timestamp_lock);
688 * Dictionary management. We maintain an in-kernel dictionary to map
689 * paths to shmfd objects. We use the FNV hash on the path to store
690 * the mappings in a hash table.
696 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
697 sx_init(&shm_dict_lock, "shm dictionary");
698 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
699 new_unrhdr64(&shm_ino_unr, 1);
700 shm_dev_ino = devfs_alloc_cdp_inode();
701 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
703 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
705 static struct shmfd *
706 shm_lookup(char *path, Fnv32_t fnv)
708 struct shm_mapping *map;
710 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
711 if (map->sm_fnv != fnv)
713 if (strcmp(map->sm_path, path) == 0)
714 return (map->sm_shmfd);
721 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
723 struct shm_mapping *map;
725 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
728 map->sm_shmfd = shm_hold(shmfd);
729 shmfd->shm_path = path;
730 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
734 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
736 struct shm_mapping *map;
739 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
740 if (map->sm_fnv != fnv)
742 if (strcmp(map->sm_path, path) == 0) {
744 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
748 error = shm_access(map->sm_shmfd, ucred,
752 map->sm_shmfd->shm_path = NULL;
753 LIST_REMOVE(map, sm_link);
754 shm_drop(map->sm_shmfd);
755 free(map->sm_path, M_SHMFD);
765 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
766 int shmflags, struct filecaps *fcaps, const char *name __unused)
768 struct filedesc *fdp;
775 int error, fd, initial_seals;
777 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE)) != 0)
780 initial_seals = F_SEAL_SEAL;
781 if ((shmflags & SHM_ALLOW_SEALING) != 0)
782 initial_seals &= ~F_SEAL_SEAL;
784 #ifdef CAPABILITY_MODE
786 * shm_open(2) is only allowed for anonymous objects.
788 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
792 AUDIT_ARG_FFLAGS(flags);
793 AUDIT_ARG_MODE(mode);
795 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
798 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
802 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
803 * If the decision is made later to allow additional seals, care must be
804 * taken below to ensure that the seals are properly set if the shmfd
805 * already existed -- this currently assumes that only F_SEAL_SEAL can
806 * be set and doesn't take further precautions to ensure the validity of
807 * the seals being added with respect to current mappings.
809 if ((initial_seals & ~F_SEAL_SEAL) != 0)
812 fdp = td->td_proc->p_fd;
813 cmode = (mode & ~fdp->fd_cmask) & ACCESSPERMS;
816 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
817 * by POSIX. We allow it to be unset here so that an in-kernel
818 * interface may be written as a thin layer around shm, optionally not
819 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally
820 * in sys_shm_open() to keep this implementation compliant.
822 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
826 /* A SHM_ANON path pointer creates an anonymous object. */
827 if (userpath == SHM_ANON) {
828 /* A read-only anonymous object is pointless. */
829 if ((flags & O_ACCMODE) == O_RDONLY) {
834 shmfd = shm_alloc(td->td_ucred, cmode);
835 shmfd->shm_seals = initial_seals;
836 shmfd->shm_flags = shmflags;
838 error = shm_copyin_path(td, userpath, &path);
845 AUDIT_ARG_UPATH1_CANON(path);
846 fnv = fnv_32_str(path, FNV1_32_INIT);
847 sx_xlock(&shm_dict_lock);
848 shmfd = shm_lookup(path, fnv);
850 /* Object does not yet exist, create it if requested. */
851 if (flags & O_CREAT) {
853 error = mac_posixshm_check_create(td->td_ucred,
857 shmfd = shm_alloc(td->td_ucred, cmode);
858 shmfd->shm_seals = initial_seals;
859 shmfd->shm_flags = shmflags;
860 shm_insert(path, fnv, shmfd);
869 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
873 * kern_shm_open() likely shouldn't ever error out on
874 * trying to set a seal that already exists, unlike
875 * F_ADD_SEALS. This would break terribly as
876 * shm_open(2) actually sets F_SEAL_SEAL to maintain
877 * historical behavior where the underlying file could
880 initial_seals &= ~shmfd->shm_seals;
883 * Object already exists, obtain a new
884 * reference if requested and permitted.
889 * initial_seals can't set additional seals if we've
890 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set,
891 * then we've already removed that one from
892 * initial_seals. This is currently redundant as we
893 * only allow setting F_SEAL_SEAL at creation time, but
894 * it's cheap to check and decreases the effort required
895 * to allow additional seals.
897 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
900 else if ((flags & (O_CREAT | O_EXCL)) ==
903 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
907 error = mac_posixshm_check_open(td->td_ucred,
908 shmfd, FFLAGS(flags & O_ACCMODE));
911 error = shm_access(shmfd, td->td_ucred,
912 FFLAGS(flags & O_ACCMODE));
916 * Truncate the file back to zero length if
917 * O_TRUNC was specified and the object was
918 * opened with read/write.
921 (flags & (O_ACCMODE | O_TRUNC)) ==
922 (O_RDWR | O_TRUNC)) {
923 VM_OBJECT_WLOCK(shmfd->shm_object);
925 error = mac_posixshm_check_truncate(
926 td->td_ucred, fp->f_cred, shmfd);
929 error = shm_dotruncate_locked(shmfd, 0,
931 VM_OBJECT_WUNLOCK(shmfd->shm_object);
935 * Currently we only allow F_SEAL_SEAL to be
936 * set initially. As noted above, this would
937 * need to be reworked should that change.
939 shmfd->shm_seals |= initial_seals;
942 rangelock_unlock(&shmfd->shm_rl, rl_cookie,
945 sx_xunlock(&shm_dict_lock);
954 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
956 td->td_retval[0] = fd;
963 #ifdef COMPAT_FREEBSD12
965 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
968 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
974 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
980 error = shm_copyin_path(td, uap->path, &path);
984 AUDIT_ARG_UPATH1_CANON(path);
985 fnv = fnv_32_str(path, FNV1_32_INIT);
986 sx_xlock(&shm_dict_lock);
987 error = shm_remove(path, fnv, td->td_ucred);
988 sx_xunlock(&shm_dict_lock);
995 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
997 char *path_from = NULL, *path_to = NULL;
998 Fnv32_t fnv_from, fnv_to;
999 struct shmfd *fd_from;
1000 struct shmfd *fd_to;
1005 AUDIT_ARG_FFLAGS(flags);
1008 * Make sure the user passed only valid flags.
1009 * If you add a new flag, please add a new term here.
1012 SHM_RENAME_NOREPLACE |
1020 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1021 * force the user to choose one or the other.
1023 if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1024 (flags & SHM_RENAME_EXCHANGE) != 0) {
1029 /* Renaming to or from anonymous makes no sense */
1030 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1035 error = shm_copyin_path(td, uap->path_from, &path_from);
1039 error = shm_copyin_path(td, uap->path_to, &path_to);
1043 AUDIT_ARG_UPATH1_CANON(path_from);
1044 AUDIT_ARG_UPATH2_CANON(path_to);
1046 /* Rename with from/to equal is a no-op */
1047 if (strcmp(path_from, path_to) == 0)
1050 fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1051 fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1053 sx_xlock(&shm_dict_lock);
1055 fd_from = shm_lookup(path_from, fnv_from);
1056 if (fd_from == NULL) {
1061 fd_to = shm_lookup(path_to, fnv_to);
1062 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1068 * Unconditionally prevents shm_remove from invalidating the 'from'
1072 error = shm_remove(path_from, fnv_from, td->td_ucred);
1075 * One of my assumptions failed if ENOENT (e.g. locking didn't
1078 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1086 * If we are exchanging, we need to ensure the shm_remove below
1087 * doesn't invalidate the dest shm's state.
1089 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1093 * NOTE: if path_to is not already in the hash, c'est la vie;
1094 * it simply means we have nothing already at path_to to unlink.
1095 * That is the ENOENT case.
1097 * If we somehow don't have access to unlink this guy, but
1098 * did for the shm at path_from, then relink the shm to path_from
1099 * and abort with EACCES.
1101 * All other errors: that is weird; let's relink and abort the
1104 error = shm_remove(path_to, fnv_to, td->td_ucred);
1105 if (error != 0 && error != ENOENT) {
1106 shm_insert(path_from, fnv_from, fd_from);
1108 /* Don't free path_from now, since the hash references it */
1115 shm_insert(path_to, fnv_to, fd_from);
1117 /* Don't free path_to now, since the hash references it */
1120 /* We kept a ref when we removed, and incremented again in insert */
1122 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1123 fd_from->shm_refs));
1125 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1126 shm_insert(path_from, fnv_from, fd_to);
1129 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1134 sx_xunlock(&shm_dict_lock);
1137 free(path_from, M_SHMFD);
1138 free(path_to, M_SHMFD);
1143 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1144 vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1145 vm_ooffset_t foff, struct thread *td)
1147 struct shmfd *shmfd;
1154 maxprot = VM_PROT_NONE;
1156 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1158 /* FREAD should always be set. */
1159 if ((fp->f_flag & FREAD) != 0)
1160 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1163 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1164 * mapping with a write seal applied. Private mappings are always
1167 if ((flags & MAP_SHARED) == 0) {
1168 cap_maxprot |= VM_PROT_WRITE;
1169 maxprot |= VM_PROT_WRITE;
1172 if ((fp->f_flag & FWRITE) != 0 &&
1173 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1174 maxprot |= VM_PROT_WRITE;
1177 * Any mappings from a writable descriptor may be upgraded to
1178 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1179 * applied between the open and subsequent mmap(2). We want to
1180 * reject application of a write seal as long as any such
1181 * mapping exists so that the seal cannot be trivially bypassed.
1183 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1184 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1189 maxprot &= cap_maxprot;
1191 /* See comment in vn_mmap(). */
1194 objsize > OFF_MAX ||
1196 foff < 0 || foff > OFF_MAX - objsize) {
1202 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1207 mtx_lock(&shm_timestamp_lock);
1208 vfs_timestamp(&shmfd->shm_atime);
1209 mtx_unlock(&shm_timestamp_lock);
1210 vm_object_reference(shmfd->shm_object);
1213 vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
1214 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1215 shmfd->shm_object, foff, writecnt, td);
1218 vm_pager_release_writecount(shmfd->shm_object, 0,
1220 vm_object_deallocate(shmfd->shm_object);
1223 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1228 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1231 struct shmfd *shmfd;
1236 mtx_lock(&shm_timestamp_lock);
1238 * SUSv4 says that x bits of permission need not be affected.
1239 * Be consistent with our shm_open there.
1242 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1246 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1247 VADMIN, active_cred);
1250 shmfd->shm_mode = mode & ACCESSPERMS;
1252 mtx_unlock(&shm_timestamp_lock);
1257 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1260 struct shmfd *shmfd;
1265 mtx_lock(&shm_timestamp_lock);
1267 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1271 if (uid == (uid_t)-1)
1272 uid = shmfd->shm_uid;
1273 if (gid == (gid_t)-1)
1274 gid = shmfd->shm_gid;
1275 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1276 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1277 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1279 shmfd->shm_uid = uid;
1280 shmfd->shm_gid = gid;
1282 mtx_unlock(&shm_timestamp_lock);
1287 * Helper routines to allow the backing object of a shared memory file
1288 * descriptor to be mapped in the kernel.
1291 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1293 struct shmfd *shmfd;
1294 vm_offset_t kva, ofs;
1298 if (fp->f_type != DTYPE_SHM)
1301 obj = shmfd->shm_object;
1302 VM_OBJECT_WLOCK(obj);
1304 * XXXRW: This validation is probably insufficient, and subject to
1305 * sign errors. It should be fixed.
1307 if (offset >= shmfd->shm_size ||
1308 offset + size > round_page(shmfd->shm_size)) {
1309 VM_OBJECT_WUNLOCK(obj);
1313 shmfd->shm_kmappings++;
1314 vm_object_reference_locked(obj);
1315 VM_OBJECT_WUNLOCK(obj);
1317 /* Map the object into the kernel_map and wire it. */
1318 kva = vm_map_min(kernel_map);
1319 ofs = offset & PAGE_MASK;
1320 offset = trunc_page(offset);
1321 size = round_page(size + ofs);
1322 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1323 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1324 VM_PROT_READ | VM_PROT_WRITE, 0);
1325 if (rv == KERN_SUCCESS) {
1326 rv = vm_map_wire(kernel_map, kva, kva + size,
1327 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1328 if (rv == KERN_SUCCESS) {
1329 *memp = (void *)(kva + ofs);
1332 vm_map_remove(kernel_map, kva, kva + size);
1334 vm_object_deallocate(obj);
1336 /* On failure, drop our mapping reference. */
1337 VM_OBJECT_WLOCK(obj);
1338 shmfd->shm_kmappings--;
1339 VM_OBJECT_WUNLOCK(obj);
1341 return (vm_mmap_to_errno(rv));
1345 * We require the caller to unmap the entire entry. This allows us to
1346 * safely decrement shm_kmappings when a mapping is removed.
1349 shm_unmap(struct file *fp, void *mem, size_t size)
1351 struct shmfd *shmfd;
1352 vm_map_entry_t entry;
1353 vm_offset_t kva, ofs;
1361 if (fp->f_type != DTYPE_SHM)
1364 kva = (vm_offset_t)mem;
1365 ofs = kva & PAGE_MASK;
1366 kva = trunc_page(kva);
1367 size = round_page(size + ofs);
1369 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1370 &obj, &pindex, &prot, &wired);
1371 if (rv != KERN_SUCCESS)
1373 if (entry->start != kva || entry->end != kva + size) {
1374 vm_map_lookup_done(map, entry);
1377 vm_map_lookup_done(map, entry);
1378 if (obj != shmfd->shm_object)
1380 vm_map_remove(map, kva, kva + size);
1381 VM_OBJECT_WLOCK(obj);
1382 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1383 shmfd->shm_kmappings--;
1384 VM_OBJECT_WUNLOCK(obj);
1389 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1391 const char *path, *pr_path;
1395 sx_assert(&shm_dict_lock, SA_LOCKED);
1396 kif->kf_type = KF_TYPE_SHM;
1397 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1398 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1399 if (shmfd->shm_path != NULL) {
1400 if (shmfd->shm_path != NULL) {
1401 path = shmfd->shm_path;
1402 pr_path = curthread->td_ucred->cr_prison->pr_path;
1403 if (strcmp(pr_path, "/") != 0) {
1404 /* Return the jail-rooted pathname. */
1405 pr_pathlen = strlen(pr_path);
1406 visible = strncmp(path, pr_path, pr_pathlen)
1407 == 0 && path[pr_pathlen] == '/';
1408 if (list && !visible)
1413 strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1420 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1421 struct filedesc *fdp __unused)
1425 sx_slock(&shm_dict_lock);
1426 res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1427 sx_sunlock(&shm_dict_lock);
1432 shm_add_seals(struct file *fp, int seals)
1434 struct shmfd *shmfd;
1436 vm_ooffset_t writemappings;
1441 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1444 /* Even already-set seals should result in EPERM. */
1445 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1449 nseals = seals & ~shmfd->shm_seals;
1450 if ((nseals & F_SEAL_WRITE) != 0) {
1452 * The rangelock above prevents writable mappings from being
1453 * added after we've started applying seals. The RLOCK here
1454 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1455 * writemappings will be done without a rangelock.
1457 VM_OBJECT_RLOCK(shmfd->shm_object);
1458 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1459 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1460 /* kmappings are also writable */
1461 if (writemappings > 0) {
1466 shmfd->shm_seals |= nseals;
1468 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1473 shm_get_seals(struct file *fp, int *seals)
1475 struct shmfd *shmfd;
1478 *seals = shmfd->shm_seals;
1483 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1486 struct shmfd *shmfd;
1490 /* This assumes that the caller already checked for overflow. */
1493 size = offset + len;
1496 * Just grab the rangelock for the range that we may be attempting to
1497 * grow, rather than blocking read/write for regions we won't be
1498 * touching while this (potential) resize is in progress. Other
1499 * attempts to resize the shmfd will have to take a write lock from 0 to
1500 * OFF_MAX, so this being potentially beyond the current usable range of
1501 * the shmfd is not necessarily a concern. If other mechanisms are
1502 * added to grow a shmfd, this may need to be re-evaluated.
1504 rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
1506 if (size > shmfd->shm_size) {
1507 VM_OBJECT_WLOCK(shmfd->shm_object);
1508 error = shm_dotruncate_locked(shmfd, size, rl_cookie);
1509 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1511 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1512 /* Translate to posix_fallocate(2) return value as needed. */
1513 if (error == ENOMEM)
1519 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1521 struct shm_mapping *shmm;
1523 struct kinfo_file kif;
1528 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1529 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1532 sx_slock(&shm_dict_lock);
1533 for (i = 0; i < shm_hash + 1; i++) {
1534 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1535 error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1542 if (req->oldptr != NULL &&
1543 kif.kf_structsize + curlen > req->oldlen)
1545 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1549 curlen += kif.kf_structsize;
1552 sx_sunlock(&shm_dict_lock);
1553 error2 = sbuf_finish(&sb);
1555 return (error != 0 ? error : error2);
1558 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1559 CTLFLAG_RD | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1560 NULL, 0, sysctl_posix_shm_list, "",
1564 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
1565 struct filecaps *caps)
1568 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
1572 * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1573 * caller, and libc will enforce it for the traditional shm_open() call. This
1574 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
1575 * interface also includes a 'name' argument that is currently unused, but could
1576 * potentially be exported later via some interface for debugging purposes.
1577 * From the kernel's perspective, it is optional. Individual consumers like
1578 * memfd_create() may require it in order to be compatible with other systems
1579 * implementing the same function.
1582 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
1585 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
1586 uap->shmflags, NULL, uap->name));