2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5 * Copyright 2020 The FreeBSD Foundation
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
13 * Portions of this software were developed by Konstantin Belousov
14 * under sponsorship from the FreeBSD Foundation.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Support for shared swap-backed anonymous memory objects via
40 * shm_open(2), shm_rename(2), and shm_unlink(2).
41 * While most of the implementation is here, vm_mmap.c contains
42 * mapping logic changes.
44 * posixshmcontrol(1) allows users to inspect the state of the memory
45 * objects. Per-uid swap resource limit controls total amount of
46 * memory that user can consume for anonymous objects, including
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
57 #include <sys/fcntl.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
65 #include <sys/signal.h>
67 #include <sys/ktrace.h>
69 #include <sys/malloc.h>
71 #include <sys/mutex.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
94 #include <vm/vm_param.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
108 struct shmfd *sm_shmfd;
109 LIST_ENTRY(shm_mapping) sm_link;
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
120 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
122 static void shm_init(void *arg);
123 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void shm_doremove(struct shm_mapping *map);
127 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
131 static int shm_copyin_path(struct thread *td, const char *userpath_in,
133 static int shm_deallocate(struct shmfd *shmfd, off_t *offset,
134 off_t *length, int flags);
136 static fo_rdwr_t shm_read;
137 static fo_rdwr_t shm_write;
138 static fo_truncate_t shm_truncate;
139 static fo_ioctl_t shm_ioctl;
140 static fo_stat_t shm_stat;
141 static fo_close_t shm_close;
142 static fo_chmod_t shm_chmod;
143 static fo_chown_t shm_chown;
144 static fo_seek_t shm_seek;
145 static fo_fill_kinfo_t shm_fill_kinfo;
146 static fo_mmap_t shm_mmap;
147 static fo_get_seals_t shm_get_seals;
148 static fo_add_seals_t shm_add_seals;
149 static fo_fallocate_t shm_fallocate;
150 static fo_fspacectl_t shm_fspacectl;
152 /* File descriptor operations. */
153 struct fileops shm_ops = {
155 .fo_write = shm_write,
156 .fo_truncate = shm_truncate,
157 .fo_ioctl = shm_ioctl,
158 .fo_poll = invfo_poll,
159 .fo_kqfilter = invfo_kqfilter,
161 .fo_close = shm_close,
162 .fo_chmod = shm_chmod,
163 .fo_chown = shm_chown,
164 .fo_sendfile = vn_sendfile,
166 .fo_fill_kinfo = shm_fill_kinfo,
168 .fo_get_seals = shm_get_seals,
169 .fo_add_seals = shm_add_seals,
170 .fo_fallocate = shm_fallocate,
171 .fo_fspacectl = shm_fspacectl,
172 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
175 FEATURE(posix_shm, "POSIX shared memory");
177 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180 static int largepage_reclaim_tries = 1;
181 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
182 CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
183 "Number of contig reclaims before giving up for default alloc policy");
185 #define shm_rangelock_unlock(shmfd, cookie) \
186 rangelock_unlock(&(shmfd)->shm_rl, (cookie), &(shmfd)->shm_mtx)
187 #define shm_rangelock_rlock(shmfd, start, end) \
188 rangelock_rlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
189 #define shm_rangelock_tryrlock(shmfd, start, end) \
190 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
191 #define shm_rangelock_wlock(shmfd, start, end) \
192 rangelock_wlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
195 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
200 int error, offset, rv;
202 idx = OFF_TO_IDX(uio->uio_offset);
203 offset = uio->uio_offset & PAGE_MASK;
204 tlen = MIN(PAGE_SIZE - offset, len);
206 rv = vm_page_grab_valid_unlocked(&m, obj, idx,
207 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
208 if (rv == VM_PAGER_OK)
212 * Read I/O without either a corresponding resident page or swap
213 * page: use zero_region. This is intended to avoid instantiating
214 * pages on read from a sparse region.
216 VM_OBJECT_WLOCK(obj);
217 m = vm_page_lookup(obj, idx);
218 if (uio->uio_rw == UIO_READ && m == NULL &&
219 !vm_pager_has_page(obj, idx, NULL, NULL)) {
220 VM_OBJECT_WUNLOCK(obj);
221 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
225 * Although the tmpfs vnode lock is held here, it is
226 * nonetheless safe to sleep waiting for a free page. The
227 * pageout daemon does not need to acquire the tmpfs vnode
228 * lock to page out tobj's pages because tobj is a OBJT_SWAP
231 rv = vm_page_grab_valid(&m, obj, idx,
232 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
233 if (rv != VM_PAGER_OK) {
234 VM_OBJECT_WUNLOCK(obj);
236 printf("uiomove_object: vm_obj %p idx %jd "
237 "pager error %d\n", obj, idx, rv);
239 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
241 VM_OBJECT_WUNLOCK(obj);
244 error = uiomove_fromphys(&m, offset, tlen, uio);
245 if (uio->uio_rw == UIO_WRITE && error == 0)
246 vm_page_set_dirty(m);
254 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
261 while ((resid = uio->uio_resid) > 0) {
262 if (obj_size <= uio->uio_offset)
264 len = MIN(obj_size - uio->uio_offset, resid);
267 error = uiomove_object_page(obj, len, uio);
268 if (error != 0 || resid == uio->uio_resid)
274 static u_long count_largepages[MAXPAGESIZES];
277 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
278 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
280 vm_page_t m __diagused;
283 psind = object->un_pager.phys.data_val;
284 if (psind == 0 || pidx >= object->size)
285 return (VM_PAGER_FAIL);
286 *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
289 * We only busy the first page in the superpage run. It is
290 * useless to busy whole run since we only remove full
291 * superpage, and it takes too long to busy e.g. 512 * 512 ==
292 * 262144 pages constituing 1G amd64 superage.
294 m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
297 *last = *first + atop(pagesizes[psind]) - 1;
298 return (VM_PAGER_OK);
302 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
303 int *before, int *after)
307 psind = object->un_pager.phys.data_val;
308 if (psind == 0 || pindex >= object->size)
310 if (before != NULL) {
311 *before = pindex - rounddown2(pindex, pagesizes[psind] /
315 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
322 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
323 vm_ooffset_t foff, struct ucred *cred)
328 shm_largepage_phys_dtor(vm_object_t object)
332 psind = object->un_pager.phys.data_val;
334 atomic_subtract_long(&count_largepages[psind],
335 object->size / (pagesizes[psind] / PAGE_SIZE));
336 vm_wire_sub(object->size);
338 KASSERT(object->size == 0,
339 ("largepage phys obj %p not initialized bit size %#jx > 0",
340 object, (uintmax_t)object->size));
344 static const struct phys_pager_ops shm_largepage_phys_ops = {
345 .phys_pg_populate = shm_largepage_phys_populate,
346 .phys_pg_haspage = shm_largepage_phys_haspage,
347 .phys_pg_ctor = shm_largepage_phys_ctor,
348 .phys_pg_dtor = shm_largepage_phys_dtor,
352 shm_largepage(struct shmfd *shmfd)
354 return (shmfd->shm_object->type == OBJT_PHYS);
358 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
363 swap_pager_freespace(obj, start, size, &c);
367 shm = obj->un_pager.swp.swp_priv;
370 KASSERT(shm->shm_pages >= c,
371 ("shm %p pages %jd free %jd", shm,
372 (uintmax_t)shm->shm_pages, (uintmax_t)c));
377 shm_page_inserted(vm_object_t obj, vm_page_t m)
381 shm = obj->un_pager.swp.swp_priv;
384 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
389 shm_page_removed(vm_object_t obj, vm_page_t m)
393 shm = obj->un_pager.swp.swp_priv;
396 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
397 KASSERT(shm->shm_pages >= 1,
398 ("shm %p pages %jd free 1", shm,
399 (uintmax_t)shm->shm_pages));
404 static struct pagerops shm_swap_pager_ops = {
405 .pgo_kvme_type = KVME_TYPE_SWAP,
406 .pgo_freespace = shm_pager_freespace,
407 .pgo_page_inserted = shm_page_inserted,
408 .pgo_page_removed = shm_page_removed,
410 static int shmfd_pager_type = -1;
413 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
420 foffset = foffset_lock(fp, 0);
425 (offset > 0 && foffset > OFF_MAX - offset)) {
432 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
436 offset += shmfd->shm_size;
444 if (offset < 0 || offset > shmfd->shm_size)
447 td->td_uretoff.tdu_off = offset;
449 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
454 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
455 int flags, struct thread *td)
463 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
467 foffset_lock_uio(fp, uio, flags);
468 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
469 uio->uio_offset + uio->uio_resid);
470 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
471 shm_rangelock_unlock(shmfd, rl_cookie);
472 foffset_unlock_uio(fp, uio, flags);
477 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
478 int flags, struct thread *td)
487 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
491 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
493 foffset_lock_uio(fp, uio, flags);
494 if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
496 * Overflow is only an error if we're supposed to expand on
497 * write. Otherwise, we'll just truncate the write to the
498 * size of the file, which can only grow up to OFF_MAX.
500 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
501 foffset_unlock_uio(fp, uio, flags);
505 size = shmfd->shm_size;
507 size = uio->uio_offset + uio->uio_resid;
509 if ((flags & FOF_OFFSET) == 0)
510 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
512 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
513 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
517 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
518 size > shmfd->shm_size) {
519 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
522 error = uiomove_object(shmfd->shm_object,
523 shmfd->shm_size, uio);
525 shm_rangelock_unlock(shmfd, rl_cookie);
526 foffset_unlock_uio(fp, uio, flags);
531 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
541 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
545 return (shm_dotruncate(shmfd, length));
549 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
553 struct shm_largepage_conf *conf;
561 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
562 * just like it would on an unlinked regular file
566 if (!shm_largepage(shmfd))
569 if (shmfd->shm_lp_psind != 0 &&
570 conf->psind != shmfd->shm_lp_psind)
572 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
573 pagesizes[conf->psind] == 0)
575 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
576 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
577 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
580 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
581 shmfd->shm_lp_psind = conf->psind;
582 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
583 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
584 shm_rangelock_unlock(shmfd, rl_cookie);
587 if (!shm_largepage(shmfd))
590 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
591 conf->psind = shmfd->shm_lp_psind;
592 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
593 shm_rangelock_unlock(shmfd, rl_cookie);
601 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
611 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
617 * Attempt to return sanish values for fstat() on a memory file
620 bzero(sb, sizeof(*sb));
621 sb->st_blksize = PAGE_SIZE;
622 sb->st_size = shmfd->shm_size;
623 mtx_lock(&shm_timestamp_lock);
624 sb->st_atim = shmfd->shm_atime;
625 sb->st_ctim = shmfd->shm_ctime;
626 sb->st_mtim = shmfd->shm_mtime;
627 sb->st_birthtim = shmfd->shm_birthtime;
628 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
629 sb->st_uid = shmfd->shm_uid;
630 sb->st_gid = shmfd->shm_gid;
631 mtx_unlock(&shm_timestamp_lock);
632 sb->st_dev = shm_dev_ino;
633 sb->st_ino = shmfd->shm_ino;
634 sb->st_nlink = shmfd->shm_object->ref_count;
635 if (shm_largepage(shmfd)) {
636 sb->st_blocks = shmfd->shm_object->size /
637 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
639 sb->st_blocks = shmfd->shm_pages;
646 shm_close(struct file *fp, struct thread *td)
658 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
664 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
665 pr_path = td->td_ucred->cr_prison->pr_path;
667 /* Construct a full pathname for jailed callers. */
668 pr_pathlen = strcmp(pr_path, "/") ==
669 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
670 error = copyinstr(userpath_in, path + pr_pathlen,
671 MAXPATHLEN - pr_pathlen, NULL);
676 if (KTRPOINT(curthread, KTR_NAMEI))
680 /* Require paths to start with a '/' character. */
681 if (path[pr_pathlen] != '/') {
696 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
702 VM_OBJECT_ASSERT_WLOCKED(object);
703 KASSERT(base >= 0, ("%s: base %d", __func__, base));
704 KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
708 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
710 MPASS(vm_page_all_valid(m));
711 } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
712 m = vm_page_alloc(object, idx,
713 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
716 vm_object_pip_add(object, 1);
717 VM_OBJECT_WUNLOCK(object);
718 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
719 VM_OBJECT_WLOCK(object);
720 vm_object_pip_wakeup(object);
721 if (rv == VM_PAGER_OK) {
723 * Since the page was not resident, and therefore not
724 * recently accessed, immediately enqueue it for
725 * asynchronous laundering. The current operation is
726 * not regarded as an access.
731 VM_OBJECT_WUNLOCK(object);
736 pmap_zero_page_area(m, base, end - base);
737 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
739 vm_page_set_dirty(m);
747 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
750 vm_pindex_t nobjsize;
754 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
755 object = shmfd->shm_object;
756 VM_OBJECT_ASSERT_WLOCKED(object);
757 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
758 if (length == shmfd->shm_size)
760 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
762 /* Are we shrinking? If so, trim the end. */
763 if (length < shmfd->shm_size) {
764 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
768 * Disallow any requests to shrink the size if this
769 * object is mapped into the kernel.
771 if (shmfd->shm_kmappings > 0)
775 * Zero the truncated part of the last page.
777 base = length & PAGE_MASK;
779 error = shm_partial_page_invalidate(object,
780 OFF_TO_IDX(length), base, PAGE_SIZE);
784 delta = IDX_TO_OFF(object->size - nobjsize);
786 if (nobjsize < object->size)
787 vm_object_page_remove(object, nobjsize, object->size,
790 /* Free the swap accounted for shm */
791 swap_release_by_cred(delta, object->cred);
792 object->charge -= delta;
794 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
797 /* Try to reserve additional swap space. */
798 delta = IDX_TO_OFF(nobjsize - object->size);
799 if (!swap_reserve_by_cred(delta, object->cred))
801 object->charge += delta;
803 shmfd->shm_size = length;
804 mtx_lock(&shm_timestamp_lock);
805 vfs_timestamp(&shmfd->shm_ctime);
806 shmfd->shm_mtime = shmfd->shm_ctime;
807 mtx_unlock(&shm_timestamp_lock);
808 object->size = nobjsize;
813 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
817 vm_pindex_t newobjsz;
818 vm_pindex_t oldobjsz __unused;
819 int aflags, error, i, psind, try;
821 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
822 object = shmfd->shm_object;
823 VM_OBJECT_ASSERT_WLOCKED(object);
824 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
826 oldobjsz = object->size;
827 newobjsz = OFF_TO_IDX(length);
828 if (length == shmfd->shm_size)
830 psind = shmfd->shm_lp_psind;
831 if (psind == 0 && length != 0)
833 if ((length & (pagesizes[psind] - 1)) != 0)
836 if (length < shmfd->shm_size) {
837 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
839 if (shmfd->shm_kmappings > 0)
841 return (ENOTSUP); /* Pages are unmanaged. */
843 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
844 object->size = newobjsz;
845 shmfd->shm_size = length;
850 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
853 aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
854 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
855 aflags |= VM_ALLOC_WAITFAIL;
859 * Extend shmfd and object, keeping all already fully
860 * allocated large pages intact even on error, because dropped
861 * object lock might allowed mapping of them.
863 while (object->size < newobjsz) {
864 m = vm_page_alloc_contig(object, object->size, aflags,
865 pagesizes[psind] / PAGE_SIZE, 0, ~0,
869 VM_OBJECT_WUNLOCK(object);
870 if (shmfd->shm_lp_alloc_policy ==
871 SHM_LARGEPAGE_ALLOC_NOWAIT ||
872 (shmfd->shm_lp_alloc_policy ==
873 SHM_LARGEPAGE_ALLOC_DEFAULT &&
874 try >= largepage_reclaim_tries)) {
875 VM_OBJECT_WLOCK(object);
878 error = vm_page_reclaim_contig(aflags,
879 pagesizes[psind] / PAGE_SIZE, 0, ~0,
880 pagesizes[psind], 0);
882 error = vm_wait_intr(object);
884 VM_OBJECT_WLOCK(object);
888 VM_OBJECT_WLOCK(object);
892 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
893 if ((m[i].flags & PG_ZERO) == 0)
894 pmap_zero_page(&m[i]);
895 vm_page_valid(&m[i]);
896 vm_page_xunbusy(&m[i]);
898 object->size += OFF_TO_IDX(pagesizes[psind]);
899 shmfd->shm_size += pagesizes[psind];
900 atomic_add_long(&count_largepages[psind], 1);
901 vm_wire_add(atop(pagesizes[psind]));
907 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
911 VM_OBJECT_WLOCK(shmfd->shm_object);
912 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
913 length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
915 VM_OBJECT_WUNLOCK(shmfd->shm_object);
920 shm_dotruncate(struct shmfd *shmfd, off_t length)
925 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
926 error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
927 shm_rangelock_unlock(shmfd, rl_cookie);
932 * shmfd object management including creation and reference counting
936 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
941 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
943 shmfd->shm_uid = ucred->cr_uid;
944 shmfd->shm_gid = ucred->cr_gid;
945 shmfd->shm_mode = mode;
947 shmfd->shm_object = phys_pager_allocate(NULL,
948 &shm_largepage_phys_ops, NULL, shmfd->shm_size,
949 VM_PROT_DEFAULT, 0, ucred);
950 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
952 obj = vm_pager_allocate(shmfd_pager_type, NULL,
953 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
954 VM_OBJECT_WLOCK(obj);
955 obj->un_pager.swp.swp_priv = shmfd;
956 VM_OBJECT_WUNLOCK(obj);
957 shmfd->shm_object = obj;
959 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
960 vfs_timestamp(&shmfd->shm_birthtime);
961 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
962 shmfd->shm_birthtime;
963 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
964 refcount_init(&shmfd->shm_refs, 1);
965 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
966 rangelock_init(&shmfd->shm_rl);
968 mac_posixshm_init(shmfd);
969 mac_posixshm_create(ucred, shmfd);
976 shm_hold(struct shmfd *shmfd)
979 refcount_acquire(&shmfd->shm_refs);
984 shm_drop(struct shmfd *shmfd)
988 if (refcount_release(&shmfd->shm_refs)) {
990 mac_posixshm_destroy(shmfd);
992 rangelock_destroy(&shmfd->shm_rl);
993 mtx_destroy(&shmfd->shm_mtx);
994 obj = shmfd->shm_object;
995 if (!shm_largepage(shmfd)) {
996 VM_OBJECT_WLOCK(obj);
997 obj->un_pager.swp.swp_priv = NULL;
998 VM_OBJECT_WUNLOCK(obj);
1000 vm_object_deallocate(obj);
1001 free(shmfd, M_SHMFD);
1006 * Determine if the credentials have sufficient permissions for a
1007 * specified combination of FREAD and FWRITE.
1010 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1020 mtx_lock(&shm_timestamp_lock);
1021 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1023 mtx_unlock(&shm_timestamp_lock);
1033 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1034 sx_init(&shm_dict_lock, "shm dictionary");
1035 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1036 new_unrhdr64(&shm_ino_unr, 1);
1037 shm_dev_ino = devfs_alloc_cdp_inode();
1038 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1039 shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1041 MPASS(shmfd_pager_type != -1);
1043 for (i = 1; i < MAXPAGESIZES; i++) {
1044 if (pagesizes[i] == 0)
1046 #define M (1024 * 1024)
1047 #define G (1024 * M)
1048 if (pagesizes[i] >= G)
1049 snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1050 else if (pagesizes[i] >= M)
1051 snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1053 snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1056 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1057 OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1058 "number of non-transient largepages allocated");
1061 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1064 * Remove all shared memory objects that belong to a prison.
1067 shm_remove_prison(struct prison *pr)
1069 struct shm_mapping *shmm, *tshmm;
1072 sx_xlock(&shm_dict_lock);
1073 for (i = 0; i < shm_hash + 1; i++) {
1074 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1075 if (shmm->sm_shmfd->shm_object->cred &&
1076 shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1080 sx_xunlock(&shm_dict_lock);
1084 * Dictionary management. We maintain an in-kernel dictionary to map
1085 * paths to shmfd objects. We use the FNV hash on the path to store
1086 * the mappings in a hash table.
1088 static struct shmfd *
1089 shm_lookup(char *path, Fnv32_t fnv)
1091 struct shm_mapping *map;
1093 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1094 if (map->sm_fnv != fnv)
1096 if (strcmp(map->sm_path, path) == 0)
1097 return (map->sm_shmfd);
1104 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1106 struct shm_mapping *map;
1108 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1109 map->sm_path = path;
1111 map->sm_shmfd = shm_hold(shmfd);
1112 shmfd->shm_path = path;
1113 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1117 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1119 struct shm_mapping *map;
1122 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1123 if (map->sm_fnv != fnv)
1125 if (strcmp(map->sm_path, path) == 0) {
1127 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1131 error = shm_access(map->sm_shmfd, ucred,
1144 shm_doremove(struct shm_mapping *map)
1146 map->sm_shmfd->shm_path = NULL;
1147 LIST_REMOVE(map, sm_link);
1148 shm_drop(map->sm_shmfd);
1149 free(map->sm_path, M_SHMFD);
1154 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1155 int shmflags, struct filecaps *fcaps, const char *name __unused)
1157 struct pwddesc *pdp;
1158 struct shmfd *shmfd;
1164 int error, fd, initial_seals;
1167 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1168 SHM_LARGEPAGE)) != 0)
1171 initial_seals = F_SEAL_SEAL;
1172 if ((shmflags & SHM_ALLOW_SEALING) != 0)
1173 initial_seals &= ~F_SEAL_SEAL;
1175 #ifdef CAPABILITY_MODE
1177 * shm_open(2) is only allowed for anonymous objects.
1179 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1183 AUDIT_ARG_FFLAGS(flags);
1184 AUDIT_ARG_MODE(mode);
1186 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1189 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1192 largepage = (shmflags & SHM_LARGEPAGE) != 0;
1193 if (largepage && !PMAP_HAS_LARGEPAGES)
1197 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1198 * If the decision is made later to allow additional seals, care must be
1199 * taken below to ensure that the seals are properly set if the shmfd
1200 * already existed -- this currently assumes that only F_SEAL_SEAL can
1201 * be set and doesn't take further precautions to ensure the validity of
1202 * the seals being added with respect to current mappings.
1204 if ((initial_seals & ~F_SEAL_SEAL) != 0)
1207 pdp = td->td_proc->p_pd;
1208 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1211 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1212 * by POSIX. We allow it to be unset here so that an in-kernel
1213 * interface may be written as a thin layer around shm, optionally not
1214 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally
1215 * in sys_shm_open() to keep this implementation compliant.
1217 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1221 /* A SHM_ANON path pointer creates an anonymous object. */
1222 if (userpath == SHM_ANON) {
1223 /* A read-only anonymous object is pointless. */
1224 if ((flags & O_ACCMODE) == O_RDONLY) {
1225 fdclose(td, fp, fd);
1229 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1230 shmfd->shm_seals = initial_seals;
1231 shmfd->shm_flags = shmflags;
1233 error = shm_copyin_path(td, userpath, &path);
1235 fdclose(td, fp, fd);
1240 AUDIT_ARG_UPATH1_CANON(path);
1241 fnv = fnv_32_str(path, FNV1_32_INIT);
1242 sx_xlock(&shm_dict_lock);
1243 shmfd = shm_lookup(path, fnv);
1244 if (shmfd == NULL) {
1245 /* Object does not yet exist, create it if requested. */
1246 if (flags & O_CREAT) {
1248 error = mac_posixshm_check_create(td->td_ucred,
1252 shmfd = shm_alloc(td->td_ucred, cmode,
1254 shmfd->shm_seals = initial_seals;
1255 shmfd->shm_flags = shmflags;
1256 shm_insert(path, fnv, shmfd);
1261 free(path, M_SHMFD);
1265 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1268 * kern_shm_open() likely shouldn't ever error out on
1269 * trying to set a seal that already exists, unlike
1270 * F_ADD_SEALS. This would break terribly as
1271 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1272 * historical behavior where the underlying file could
1275 initial_seals &= ~shmfd->shm_seals;
1278 * Object already exists, obtain a new
1279 * reference if requested and permitted.
1281 free(path, M_SHMFD);
1284 * initial_seals can't set additional seals if we've
1285 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set,
1286 * then we've already removed that one from
1287 * initial_seals. This is currently redundant as we
1288 * only allow setting F_SEAL_SEAL at creation time, but
1289 * it's cheap to check and decreases the effort required
1290 * to allow additional seals.
1292 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1295 else if ((flags & (O_CREAT | O_EXCL)) ==
1298 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1302 error = mac_posixshm_check_open(td->td_ucred,
1303 shmfd, FFLAGS(flags & O_ACCMODE));
1306 error = shm_access(shmfd, td->td_ucred,
1307 FFLAGS(flags & O_ACCMODE));
1311 * Truncate the file back to zero length if
1312 * O_TRUNC was specified and the object was
1313 * opened with read/write.
1316 (flags & (O_ACCMODE | O_TRUNC)) ==
1317 (O_RDWR | O_TRUNC)) {
1318 VM_OBJECT_WLOCK(shmfd->shm_object);
1320 error = mac_posixshm_check_truncate(
1321 td->td_ucred, fp->f_cred, shmfd);
1324 error = shm_dotruncate_locked(shmfd, 0,
1326 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1330 * Currently we only allow F_SEAL_SEAL to be
1331 * set initially. As noted above, this would
1332 * need to be reworked should that change.
1334 shmfd->shm_seals |= initial_seals;
1337 shm_rangelock_unlock(shmfd, rl_cookie);
1339 sx_xunlock(&shm_dict_lock);
1342 fdclose(td, fp, fd);
1348 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1350 td->td_retval[0] = fd;
1357 #ifdef COMPAT_FREEBSD12
1359 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1362 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1368 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1374 error = shm_copyin_path(td, uap->path, &path);
1378 AUDIT_ARG_UPATH1_CANON(path);
1379 fnv = fnv_32_str(path, FNV1_32_INIT);
1380 sx_xlock(&shm_dict_lock);
1381 error = shm_remove(path, fnv, td->td_ucred);
1382 sx_xunlock(&shm_dict_lock);
1383 free(path, M_SHMFD);
1389 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1391 char *path_from = NULL, *path_to = NULL;
1392 Fnv32_t fnv_from, fnv_to;
1393 struct shmfd *fd_from;
1394 struct shmfd *fd_to;
1399 AUDIT_ARG_FFLAGS(flags);
1402 * Make sure the user passed only valid flags.
1403 * If you add a new flag, please add a new term here.
1406 SHM_RENAME_NOREPLACE |
1414 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1415 * force the user to choose one or the other.
1417 if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1418 (flags & SHM_RENAME_EXCHANGE) != 0) {
1423 /* Renaming to or from anonymous makes no sense */
1424 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1429 error = shm_copyin_path(td, uap->path_from, &path_from);
1433 error = shm_copyin_path(td, uap->path_to, &path_to);
1437 AUDIT_ARG_UPATH1_CANON(path_from);
1438 AUDIT_ARG_UPATH2_CANON(path_to);
1440 /* Rename with from/to equal is a no-op */
1441 if (strcmp(path_from, path_to) == 0)
1444 fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1445 fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1447 sx_xlock(&shm_dict_lock);
1449 fd_from = shm_lookup(path_from, fnv_from);
1450 if (fd_from == NULL) {
1455 fd_to = shm_lookup(path_to, fnv_to);
1456 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1462 * Unconditionally prevents shm_remove from invalidating the 'from'
1466 error = shm_remove(path_from, fnv_from, td->td_ucred);
1469 * One of my assumptions failed if ENOENT (e.g. locking didn't
1472 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1480 * If we are exchanging, we need to ensure the shm_remove below
1481 * doesn't invalidate the dest shm's state.
1483 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1487 * NOTE: if path_to is not already in the hash, c'est la vie;
1488 * it simply means we have nothing already at path_to to unlink.
1489 * That is the ENOENT case.
1491 * If we somehow don't have access to unlink this guy, but
1492 * did for the shm at path_from, then relink the shm to path_from
1493 * and abort with EACCES.
1495 * All other errors: that is weird; let's relink and abort the
1498 error = shm_remove(path_to, fnv_to, td->td_ucred);
1499 if (error != 0 && error != ENOENT) {
1500 shm_insert(path_from, fnv_from, fd_from);
1502 /* Don't free path_from now, since the hash references it */
1509 shm_insert(path_to, fnv_to, fd_from);
1511 /* Don't free path_to now, since the hash references it */
1514 /* We kept a ref when we removed, and incremented again in insert */
1516 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1517 fd_from->shm_refs));
1519 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1520 shm_insert(path_from, fnv_from, fd_to);
1523 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1528 sx_xunlock(&shm_dict_lock);
1531 free(path_from, M_SHMFD);
1532 free(path_to, M_SHMFD);
1537 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1538 vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1539 vm_ooffset_t foff, struct thread *td)
1541 struct vmspace *vms;
1542 vm_map_entry_t next_entry, prev_entry;
1543 vm_offset_t align, mask, maxaddr;
1544 int docow, error, rv, try;
1547 if (shmfd->shm_lp_psind == 0)
1550 /* MAP_PRIVATE is disabled */
1551 if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1552 MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1555 vms = td->td_proc->p_vmspace;
1556 curmap = map == &vms->vm_map;
1558 error = kern_mmap_racct_check(td, map, size);
1563 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1564 docow |= MAP_INHERIT_SHARE;
1565 if ((flags & MAP_NOCORE) != 0)
1566 docow |= MAP_DISABLE_COREDUMP;
1568 mask = pagesizes[shmfd->shm_lp_psind] - 1;
1569 if ((foff & mask) != 0)
1571 maxaddr = vm_map_max(map);
1572 if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1573 maxaddr = MAP_32BIT_MAX_ADDR;
1574 if (size == 0 || (size & mask) != 0 ||
1575 (*addr != 0 && ((*addr & mask) != 0 ||
1576 *addr + size < *addr || *addr + size > maxaddr)))
1579 align = flags & MAP_ALIGNMENT_MASK;
1581 align = pagesizes[shmfd->shm_lp_psind];
1582 } else if (align == MAP_ALIGNED_SUPER) {
1583 if (shmfd->shm_lp_psind != 1)
1585 align = pagesizes[1];
1587 align >>= MAP_ALIGNMENT_SHIFT;
1588 align = 1ULL << align;
1589 /* Also handles overflow. */
1590 if (align < pagesizes[shmfd->shm_lp_psind])
1595 if ((flags & MAP_FIXED) == 0) {
1597 if (curmap && (*addr == 0 ||
1598 (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1599 *addr < round_page((vm_offset_t)vms->vm_daddr +
1600 lim_max(td, RLIMIT_DATA))))) {
1601 *addr = roundup2((vm_offset_t)vms->vm_daddr +
1602 lim_max(td, RLIMIT_DATA),
1603 pagesizes[shmfd->shm_lp_psind]);
1606 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1607 if (rv != KERN_SUCCESS) {
1610 *addr = vm_map_min(map);
1611 if ((*addr & mask) != 0)
1612 *addr = (*addr + mask) & mask;
1617 } else if ((flags & MAP_EXCL) == 0) {
1618 rv = vm_map_delete(map, *addr, *addr + size);
1619 if (rv != KERN_SUCCESS)
1623 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1625 next_entry = vm_map_entry_succ(prev_entry);
1626 if (next_entry->start < *addr + size)
1630 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1631 prot, max_prot, docow);
1633 error = vm_mmap_to_errno(rv);
1640 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1641 vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1642 vm_ooffset_t foff, struct thread *td)
1644 struct shmfd *shmfd;
1651 maxprot = VM_PROT_NONE;
1653 rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1654 /* FREAD should always be set. */
1655 if ((fp->f_flag & FREAD) != 0)
1656 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1659 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1660 * mapping with a write seal applied. Private mappings are always
1663 if ((flags & MAP_SHARED) == 0) {
1664 cap_maxprot |= VM_PROT_WRITE;
1665 maxprot |= VM_PROT_WRITE;
1668 if ((fp->f_flag & FWRITE) != 0 &&
1669 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1670 maxprot |= VM_PROT_WRITE;
1673 * Any mappings from a writable descriptor may be upgraded to
1674 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1675 * applied between the open and subsequent mmap(2). We want to
1676 * reject application of a write seal as long as any such
1677 * mapping exists so that the seal cannot be trivially bypassed.
1679 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1680 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1685 maxprot &= cap_maxprot;
1687 /* See comment in vn_mmap(). */
1690 objsize > OFF_MAX ||
1692 foff > OFF_MAX - objsize) {
1698 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1703 mtx_lock(&shm_timestamp_lock);
1704 vfs_timestamp(&shmfd->shm_atime);
1705 mtx_unlock(&shm_timestamp_lock);
1706 vm_object_reference(shmfd->shm_object);
1708 if (shm_largepage(shmfd)) {
1710 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1711 maxprot, flags, foff, td);
1714 vm_pager_update_writecount(shmfd->shm_object, 0,
1717 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1718 shmfd->shm_object, foff, writecnt, td);
1722 vm_pager_release_writecount(shmfd->shm_object, 0,
1724 vm_object_deallocate(shmfd->shm_object);
1727 shm_rangelock_unlock(shmfd, rl_cookie);
1732 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1735 struct shmfd *shmfd;
1740 mtx_lock(&shm_timestamp_lock);
1742 * SUSv4 says that x bits of permission need not be affected.
1743 * Be consistent with our shm_open there.
1746 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1750 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1751 VADMIN, active_cred);
1754 shmfd->shm_mode = mode & ACCESSPERMS;
1756 mtx_unlock(&shm_timestamp_lock);
1761 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1764 struct shmfd *shmfd;
1769 mtx_lock(&shm_timestamp_lock);
1771 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1775 if (uid == (uid_t)-1)
1776 uid = shmfd->shm_uid;
1777 if (gid == (gid_t)-1)
1778 gid = shmfd->shm_gid;
1779 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1780 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1781 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1783 shmfd->shm_uid = uid;
1784 shmfd->shm_gid = gid;
1786 mtx_unlock(&shm_timestamp_lock);
1791 * Helper routines to allow the backing object of a shared memory file
1792 * descriptor to be mapped in the kernel.
1795 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1797 struct shmfd *shmfd;
1798 vm_offset_t kva, ofs;
1802 if (fp->f_type != DTYPE_SHM)
1805 obj = shmfd->shm_object;
1806 VM_OBJECT_WLOCK(obj);
1808 * XXXRW: This validation is probably insufficient, and subject to
1809 * sign errors. It should be fixed.
1811 if (offset >= shmfd->shm_size ||
1812 offset + size > round_page(shmfd->shm_size)) {
1813 VM_OBJECT_WUNLOCK(obj);
1817 shmfd->shm_kmappings++;
1818 vm_object_reference_locked(obj);
1819 VM_OBJECT_WUNLOCK(obj);
1821 /* Map the object into the kernel_map and wire it. */
1822 kva = vm_map_min(kernel_map);
1823 ofs = offset & PAGE_MASK;
1824 offset = trunc_page(offset);
1825 size = round_page(size + ofs);
1826 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1827 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1828 VM_PROT_READ | VM_PROT_WRITE, 0);
1829 if (rv == KERN_SUCCESS) {
1830 rv = vm_map_wire(kernel_map, kva, kva + size,
1831 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1832 if (rv == KERN_SUCCESS) {
1833 *memp = (void *)(kva + ofs);
1836 vm_map_remove(kernel_map, kva, kva + size);
1838 vm_object_deallocate(obj);
1840 /* On failure, drop our mapping reference. */
1841 VM_OBJECT_WLOCK(obj);
1842 shmfd->shm_kmappings--;
1843 VM_OBJECT_WUNLOCK(obj);
1845 return (vm_mmap_to_errno(rv));
1849 * We require the caller to unmap the entire entry. This allows us to
1850 * safely decrement shm_kmappings when a mapping is removed.
1853 shm_unmap(struct file *fp, void *mem, size_t size)
1855 struct shmfd *shmfd;
1856 vm_map_entry_t entry;
1857 vm_offset_t kva, ofs;
1865 if (fp->f_type != DTYPE_SHM)
1868 kva = (vm_offset_t)mem;
1869 ofs = kva & PAGE_MASK;
1870 kva = trunc_page(kva);
1871 size = round_page(size + ofs);
1873 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1874 &obj, &pindex, &prot, &wired);
1875 if (rv != KERN_SUCCESS)
1877 if (entry->start != kva || entry->end != kva + size) {
1878 vm_map_lookup_done(map, entry);
1881 vm_map_lookup_done(map, entry);
1882 if (obj != shmfd->shm_object)
1884 vm_map_remove(map, kva, kva + size);
1885 VM_OBJECT_WLOCK(obj);
1886 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1887 shmfd->shm_kmappings--;
1888 VM_OBJECT_WUNLOCK(obj);
1893 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1895 const char *path, *pr_path;
1899 sx_assert(&shm_dict_lock, SA_LOCKED);
1900 kif->kf_type = KF_TYPE_SHM;
1901 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1902 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1903 if (shmfd->shm_path != NULL) {
1904 if (shmfd->shm_path != NULL) {
1905 path = shmfd->shm_path;
1906 pr_path = curthread->td_ucred->cr_prison->pr_path;
1907 if (strcmp(pr_path, "/") != 0) {
1908 /* Return the jail-rooted pathname. */
1909 pr_pathlen = strlen(pr_path);
1910 visible = strncmp(path, pr_path, pr_pathlen)
1911 == 0 && path[pr_pathlen] == '/';
1912 if (list && !visible)
1917 strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1924 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1925 struct filedesc *fdp __unused)
1929 sx_slock(&shm_dict_lock);
1930 res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1931 sx_sunlock(&shm_dict_lock);
1936 shm_add_seals(struct file *fp, int seals)
1938 struct shmfd *shmfd;
1940 vm_ooffset_t writemappings;
1945 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1947 /* Even already-set seals should result in EPERM. */
1948 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1952 nseals = seals & ~shmfd->shm_seals;
1953 if ((nseals & F_SEAL_WRITE) != 0) {
1954 if (shm_largepage(shmfd)) {
1960 * The rangelock above prevents writable mappings from being
1961 * added after we've started applying seals. The RLOCK here
1962 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1963 * writemappings will be done without a rangelock.
1965 VM_OBJECT_RLOCK(shmfd->shm_object);
1966 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1967 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1968 /* kmappings are also writable */
1969 if (writemappings > 0) {
1974 shmfd->shm_seals |= nseals;
1976 shm_rangelock_unlock(shmfd, rl_cookie);
1981 shm_get_seals(struct file *fp, int *seals)
1983 struct shmfd *shmfd;
1986 *seals = shmfd->shm_seals;
1991 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
1994 vm_pindex_t pistart, pi, piend;
1995 vm_ooffset_t off, len;
1996 int startofs, endofs, end;
2001 KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2002 if (off + len > shmfd->shm_size)
2003 len = shmfd->shm_size - off;
2004 object = shmfd->shm_object;
2005 startofs = off & PAGE_MASK;
2006 endofs = (off + len) & PAGE_MASK;
2007 pistart = OFF_TO_IDX(off);
2008 piend = OFF_TO_IDX(off + len);
2009 pi = OFF_TO_IDX(off + PAGE_MASK);
2012 /* Handle the case when offset is on or beyond shm size. */
2013 if ((off_t)len <= 0) {
2018 VM_OBJECT_WLOCK(object);
2020 if (startofs != 0) {
2021 end = pistart != piend ? PAGE_SIZE : endofs;
2022 error = shm_partial_page_invalidate(object, pistart, startofs,
2026 off += end - startofs;
2027 len -= end - startofs;
2031 vm_object_page_remove(object, pi, piend, 0);
2032 off += IDX_TO_OFF(piend - pi);
2033 len -= IDX_TO_OFF(piend - pi);
2036 if (endofs != 0 && pistart != piend) {
2037 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2045 VM_OBJECT_WUNLOCK(shmfd->shm_object);
2052 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2053 struct ucred *active_cred, struct thread *td)
2056 struct shmfd *shmfd;
2060 KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2061 KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2062 ("shm_fspacectl: non-zero flags"));
2063 KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2064 ("shm_fspacectl: offset/length overflow or underflow"));
2070 rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2072 case SPACECTL_DEALLOC:
2073 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2077 error = shm_deallocate(shmfd, &off, &len, flags);
2082 __assert_unreachable();
2084 shm_rangelock_unlock(shmfd, rl_cookie);
2090 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2093 struct shmfd *shmfd;
2097 /* This assumes that the caller already checked for overflow. */
2100 size = offset + len;
2103 * Just grab the rangelock for the range that we may be attempting to
2104 * grow, rather than blocking read/write for regions we won't be
2105 * touching while this (potential) resize is in progress. Other
2106 * attempts to resize the shmfd will have to take a write lock from 0 to
2107 * OFF_MAX, so this being potentially beyond the current usable range of
2108 * the shmfd is not necessarily a concern. If other mechanisms are
2109 * added to grow a shmfd, this may need to be re-evaluated.
2111 rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2112 if (size > shmfd->shm_size)
2113 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2114 shm_rangelock_unlock(shmfd, rl_cookie);
2115 /* Translate to posix_fallocate(2) return value as needed. */
2116 if (error == ENOMEM)
2122 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2124 struct shm_mapping *shmm;
2126 struct kinfo_file kif;
2130 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2131 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2133 sx_slock(&shm_dict_lock);
2134 for (i = 0; i < shm_hash + 1; i++) {
2135 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2136 error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2138 if (error == EPERM) {
2145 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2151 sx_sunlock(&shm_dict_lock);
2152 error2 = sbuf_finish(&sb);
2154 return (error != 0 ? error : error2);
2157 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2158 CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2159 NULL, 0, sysctl_posix_shm_list, "",
2163 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2164 struct filecaps *caps)
2167 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2171 * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2172 * caller, and libc will enforce it for the traditional shm_open() call. This
2173 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
2174 * interface also includes a 'name' argument that is currently unused, but could
2175 * potentially be exported later via some interface for debugging purposes.
2176 * From the kernel's perspective, it is optional. Individual consumers like
2177 * memfd_create() may require it in order to be compatible with other systems
2178 * implementing the same function.
2181 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2184 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2185 uap->shmflags, NULL, uap->name));