2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5 * Copyright 2020 The FreeBSD Foundation
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
13 * Portions of this software were developed by Konstantin Belousov
14 * under sponsorship from the FreeBSD Foundation.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * Support for shared swap-backed anonymous memory objects via
40 * shm_open(2), shm_rename(2), and shm_unlink(2).
41 * While most of the implementation is here, vm_mmap.c contains
42 * mapping logic changes.
44 * posixshmcontrol(1) allows users to inspect the state of the memory
45 * objects. Per-uid swap resource limit controls total amount of
46 * memory that user can consume for anonymous objects, including
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
57 #include <sys/fcntl.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
65 #include <sys/signal.h>
67 #include <sys/ktrace.h>
69 #include <sys/malloc.h>
71 #include <sys/mutex.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
94 #include <vm/vm_param.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
108 struct shmfd *sm_shmfd;
109 LIST_ENTRY(shm_mapping) sm_link;
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
120 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
122 static void shm_init(void *arg);
123 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void shm_doremove(struct shm_mapping *map);
127 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
131 static int shm_copyin_path(struct thread *td, const char *userpath_in,
134 static fo_rdwr_t shm_read;
135 static fo_rdwr_t shm_write;
136 static fo_truncate_t shm_truncate;
137 static fo_ioctl_t shm_ioctl;
138 static fo_stat_t shm_stat;
139 static fo_close_t shm_close;
140 static fo_chmod_t shm_chmod;
141 static fo_chown_t shm_chown;
142 static fo_seek_t shm_seek;
143 static fo_fill_kinfo_t shm_fill_kinfo;
144 static fo_mmap_t shm_mmap;
145 static fo_get_seals_t shm_get_seals;
146 static fo_add_seals_t shm_add_seals;
147 static fo_fallocate_t shm_fallocate;
149 /* File descriptor operations. */
150 struct fileops shm_ops = {
152 .fo_write = shm_write,
153 .fo_truncate = shm_truncate,
154 .fo_ioctl = shm_ioctl,
155 .fo_poll = invfo_poll,
156 .fo_kqfilter = invfo_kqfilter,
158 .fo_close = shm_close,
159 .fo_chmod = shm_chmod,
160 .fo_chown = shm_chown,
161 .fo_sendfile = vn_sendfile,
163 .fo_fill_kinfo = shm_fill_kinfo,
165 .fo_get_seals = shm_get_seals,
166 .fo_add_seals = shm_add_seals,
167 .fo_fallocate = shm_fallocate,
168 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
171 FEATURE(posix_shm, "POSIX shared memory");
173 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
176 static int largepage_reclaim_tries = 1;
177 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
178 CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
179 "Number of contig reclaims before giving up for default alloc policy");
181 #define shm_rangelock_unlock(shmfd, cookie) \
182 rangelock_unlock(&(shmfd)->shm_rl, (cookie), &(shmfd)->shm_mtx)
183 #define shm_rangelock_rlock(shmfd, start, end) \
184 rangelock_rlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
185 #define shm_rangelock_tryrlock(shmfd, start, end) \
186 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
187 #define shm_rangelock_wlock(shmfd, start, end) \
188 rangelock_wlock(&(shmfd)->shm_rl, (start), (end), &(shmfd)->shm_mtx)
191 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
196 int error, offset, rv;
198 idx = OFF_TO_IDX(uio->uio_offset);
199 offset = uio->uio_offset & PAGE_MASK;
200 tlen = MIN(PAGE_SIZE - offset, len);
202 rv = vm_page_grab_valid_unlocked(&m, obj, idx,
203 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
204 if (rv == VM_PAGER_OK)
208 * Read I/O without either a corresponding resident page or swap
209 * page: use zero_region. This is intended to avoid instantiating
210 * pages on read from a sparse region.
212 VM_OBJECT_WLOCK(obj);
213 m = vm_page_lookup(obj, idx);
214 if (uio->uio_rw == UIO_READ && m == NULL &&
215 !vm_pager_has_page(obj, idx, NULL, NULL)) {
216 VM_OBJECT_WUNLOCK(obj);
217 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
221 * Although the tmpfs vnode lock is held here, it is
222 * nonetheless safe to sleep waiting for a free page. The
223 * pageout daemon does not need to acquire the tmpfs vnode
224 * lock to page out tobj's pages because tobj is a OBJT_SWAP
227 rv = vm_page_grab_valid(&m, obj, idx,
228 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
229 if (rv != VM_PAGER_OK) {
230 VM_OBJECT_WUNLOCK(obj);
232 printf("uiomove_object: vm_obj %p idx %jd "
233 "pager error %d\n", obj, idx, rv);
235 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
237 VM_OBJECT_WUNLOCK(obj);
240 error = uiomove_fromphys(&m, offset, tlen, uio);
241 if (uio->uio_rw == UIO_WRITE && error == 0)
242 vm_page_set_dirty(m);
250 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
257 while ((resid = uio->uio_resid) > 0) {
258 if (obj_size <= uio->uio_offset)
260 len = MIN(obj_size - uio->uio_offset, resid);
263 error = uiomove_object_page(obj, len, uio);
264 if (error != 0 || resid == uio->uio_resid)
270 static u_long count_largepages[MAXPAGESIZES];
273 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
274 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
276 vm_page_t m __diagused;
279 psind = object->un_pager.phys.data_val;
280 if (psind == 0 || pidx >= object->size)
281 return (VM_PAGER_FAIL);
282 *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
285 * We only busy the first page in the superpage run. It is
286 * useless to busy whole run since we only remove full
287 * superpage, and it takes too long to busy e.g. 512 * 512 ==
288 * 262144 pages constituing 1G amd64 superage.
290 m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
293 *last = *first + atop(pagesizes[psind]) - 1;
294 return (VM_PAGER_OK);
298 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
299 int *before, int *after)
303 psind = object->un_pager.phys.data_val;
304 if (psind == 0 || pindex >= object->size)
306 if (before != NULL) {
307 *before = pindex - rounddown2(pindex, pagesizes[psind] /
311 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
318 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
319 vm_ooffset_t foff, struct ucred *cred)
324 shm_largepage_phys_dtor(vm_object_t object)
328 psind = object->un_pager.phys.data_val;
330 atomic_subtract_long(&count_largepages[psind],
331 object->size / (pagesizes[psind] / PAGE_SIZE));
332 vm_wire_sub(object->size);
334 KASSERT(object->size == 0,
335 ("largepage phys obj %p not initialized bit size %#jx > 0",
336 object, (uintmax_t)object->size));
340 static const struct phys_pager_ops shm_largepage_phys_ops = {
341 .phys_pg_populate = shm_largepage_phys_populate,
342 .phys_pg_haspage = shm_largepage_phys_haspage,
343 .phys_pg_ctor = shm_largepage_phys_ctor,
344 .phys_pg_dtor = shm_largepage_phys_dtor,
348 shm_largepage(struct shmfd *shmfd)
350 return (shmfd->shm_object->type == OBJT_PHYS);
354 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
359 swap_pager_freespace(obj, start, size, &c);
363 shm = obj->un_pager.swp.swp_priv;
366 KASSERT(shm->shm_pages >= c,
367 ("shm %p pages %jd free %jd", shm,
368 (uintmax_t)shm->shm_pages, (uintmax_t)c));
373 shm_page_inserted(vm_object_t obj, vm_page_t m)
377 shm = obj->un_pager.swp.swp_priv;
380 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
385 shm_page_removed(vm_object_t obj, vm_page_t m)
389 shm = obj->un_pager.swp.swp_priv;
392 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
393 KASSERT(shm->shm_pages >= 1,
394 ("shm %p pages %jd free 1", shm,
395 (uintmax_t)shm->shm_pages));
400 static struct pagerops shm_swap_pager_ops = {
401 .pgo_kvme_type = KVME_TYPE_SWAP,
402 .pgo_freespace = shm_pager_freespace,
403 .pgo_page_inserted = shm_page_inserted,
404 .pgo_page_removed = shm_page_removed,
406 static int shmfd_pager_type = -1;
409 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
416 foffset = foffset_lock(fp, 0);
421 (offset > 0 && foffset > OFF_MAX - offset)) {
428 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
432 offset += shmfd->shm_size;
440 if (offset < 0 || offset > shmfd->shm_size)
443 td->td_uretoff.tdu_off = offset;
445 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
450 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
451 int flags, struct thread *td)
459 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
463 foffset_lock_uio(fp, uio, flags);
464 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
465 uio->uio_offset + uio->uio_resid);
466 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
467 shm_rangelock_unlock(shmfd, rl_cookie);
468 foffset_unlock_uio(fp, uio, flags);
473 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
474 int flags, struct thread *td)
483 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
487 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
489 foffset_lock_uio(fp, uio, flags);
490 if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
492 * Overflow is only an error if we're supposed to expand on
493 * write. Otherwise, we'll just truncate the write to the
494 * size of the file, which can only grow up to OFF_MAX.
496 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
497 foffset_unlock_uio(fp, uio, flags);
501 size = shmfd->shm_size;
503 size = uio->uio_offset + uio->uio_resid;
505 if ((flags & FOF_OFFSET) == 0)
506 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
508 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
509 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
513 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
514 size > shmfd->shm_size) {
515 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
518 error = uiomove_object(shmfd->shm_object,
519 shmfd->shm_size, uio);
521 shm_rangelock_unlock(shmfd, rl_cookie);
522 foffset_unlock_uio(fp, uio, flags);
527 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
537 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
541 return (shm_dotruncate(shmfd, length));
545 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
549 struct shm_largepage_conf *conf;
557 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
558 * just like it would on an unlinked regular file
562 if (!shm_largepage(shmfd))
565 if (shmfd->shm_lp_psind != 0 &&
566 conf->psind != shmfd->shm_lp_psind)
568 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
569 pagesizes[conf->psind] == 0)
571 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
572 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
573 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
576 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
577 shmfd->shm_lp_psind = conf->psind;
578 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
579 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
580 shm_rangelock_unlock(shmfd, rl_cookie);
583 if (!shm_largepage(shmfd))
586 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
587 conf->psind = shmfd->shm_lp_psind;
588 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
589 shm_rangelock_unlock(shmfd, rl_cookie);
597 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
608 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
614 * Attempt to return sanish values for fstat() on a memory file
617 bzero(sb, sizeof(*sb));
618 sb->st_blksize = PAGE_SIZE;
619 sb->st_size = shmfd->shm_size;
620 mtx_lock(&shm_timestamp_lock);
621 sb->st_atim = shmfd->shm_atime;
622 sb->st_ctim = shmfd->shm_ctime;
623 sb->st_mtim = shmfd->shm_mtime;
624 sb->st_birthtim = shmfd->shm_birthtime;
625 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
626 sb->st_uid = shmfd->shm_uid;
627 sb->st_gid = shmfd->shm_gid;
628 mtx_unlock(&shm_timestamp_lock);
629 sb->st_dev = shm_dev_ino;
630 sb->st_ino = shmfd->shm_ino;
631 sb->st_nlink = shmfd->shm_object->ref_count;
632 if (shm_largepage(shmfd)) {
633 sb->st_blocks = shmfd->shm_object->size /
634 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
636 sb->st_blocks = shmfd->shm_pages;
643 shm_close(struct file *fp, struct thread *td)
655 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
661 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
662 pr_path = td->td_ucred->cr_prison->pr_path;
664 /* Construct a full pathname for jailed callers. */
665 pr_pathlen = strcmp(pr_path, "/") ==
666 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
667 error = copyinstr(userpath_in, path + pr_pathlen,
668 MAXPATHLEN - pr_pathlen, NULL);
673 if (KTRPOINT(curthread, KTR_NAMEI))
677 /* Require paths to start with a '/' character. */
678 if (path[pr_pathlen] != '/') {
693 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
697 vm_pindex_t idx, nobjsize;
701 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
702 object = shmfd->shm_object;
703 VM_OBJECT_ASSERT_WLOCKED(object);
704 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
705 if (length == shmfd->shm_size)
707 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
709 /* Are we shrinking? If so, trim the end. */
710 if (length < shmfd->shm_size) {
711 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
715 * Disallow any requests to shrink the size if this
716 * object is mapped into the kernel.
718 if (shmfd->shm_kmappings > 0)
722 * Zero the truncated part of the last page.
724 base = length & PAGE_MASK;
726 idx = OFF_TO_IDX(length);
728 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
730 MPASS(vm_page_all_valid(m));
731 } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
732 m = vm_page_alloc(object, idx,
733 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
736 vm_object_pip_add(object, 1);
737 VM_OBJECT_WUNLOCK(object);
738 rv = vm_pager_get_pages(object, &m, 1, NULL,
740 VM_OBJECT_WLOCK(object);
741 vm_object_pip_wakeup(object);
742 if (rv == VM_PAGER_OK) {
744 * Since the page was not resident,
745 * and therefore not recently
746 * accessed, immediately enqueue it
747 * for asynchronous laundering. The
748 * current operation is not regarded
754 VM_OBJECT_WUNLOCK(object);
759 pmap_zero_page_area(m, base, PAGE_SIZE - base);
760 KASSERT(vm_page_all_valid(m),
761 ("shm_dotruncate: page %p is invalid", m));
762 vm_page_set_dirty(m);
766 delta = IDX_TO_OFF(object->size - nobjsize);
768 if (nobjsize < object->size)
769 vm_object_page_remove(object, nobjsize, object->size,
772 /* Free the swap accounted for shm */
773 swap_release_by_cred(delta, object->cred);
774 object->charge -= delta;
776 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
779 /* Try to reserve additional swap space. */
780 delta = IDX_TO_OFF(nobjsize - object->size);
781 if (!swap_reserve_by_cred(delta, object->cred))
783 object->charge += delta;
785 shmfd->shm_size = length;
786 mtx_lock(&shm_timestamp_lock);
787 vfs_timestamp(&shmfd->shm_ctime);
788 shmfd->shm_mtime = shmfd->shm_ctime;
789 mtx_unlock(&shm_timestamp_lock);
790 object->size = nobjsize;
795 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
799 vm_pindex_t newobjsz;
800 vm_pindex_t oldobjsz __unused;
801 int aflags, error, i, psind, try;
803 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
804 object = shmfd->shm_object;
805 VM_OBJECT_ASSERT_WLOCKED(object);
806 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
808 oldobjsz = object->size;
809 newobjsz = OFF_TO_IDX(length);
810 if (length == shmfd->shm_size)
812 psind = shmfd->shm_lp_psind;
813 if (psind == 0 && length != 0)
815 if ((length & (pagesizes[psind] - 1)) != 0)
818 if (length < shmfd->shm_size) {
819 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
821 if (shmfd->shm_kmappings > 0)
823 return (ENOTSUP); /* Pages are unmanaged. */
825 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
826 object->size = newobjsz;
827 shmfd->shm_size = length;
832 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
835 aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
836 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
837 aflags |= VM_ALLOC_WAITFAIL;
841 * Extend shmfd and object, keeping all already fully
842 * allocated large pages intact even on error, because dropped
843 * object lock might allowed mapping of them.
845 while (object->size < newobjsz) {
846 m = vm_page_alloc_contig(object, object->size, aflags,
847 pagesizes[psind] / PAGE_SIZE, 0, ~0,
851 VM_OBJECT_WUNLOCK(object);
852 if (shmfd->shm_lp_alloc_policy ==
853 SHM_LARGEPAGE_ALLOC_NOWAIT ||
854 (shmfd->shm_lp_alloc_policy ==
855 SHM_LARGEPAGE_ALLOC_DEFAULT &&
856 try >= largepage_reclaim_tries)) {
857 VM_OBJECT_WLOCK(object);
860 error = vm_page_reclaim_contig(aflags,
861 pagesizes[psind] / PAGE_SIZE, 0, ~0,
862 pagesizes[psind], 0) ? 0 :
863 vm_wait_intr(object);
865 VM_OBJECT_WLOCK(object);
869 VM_OBJECT_WLOCK(object);
873 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
874 if ((m[i].flags & PG_ZERO) == 0)
875 pmap_zero_page(&m[i]);
876 vm_page_valid(&m[i]);
877 vm_page_xunbusy(&m[i]);
879 object->size += OFF_TO_IDX(pagesizes[psind]);
880 shmfd->shm_size += pagesizes[psind];
881 atomic_add_long(&count_largepages[psind], 1);
882 vm_wire_add(atop(pagesizes[psind]));
888 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
892 VM_OBJECT_WLOCK(shmfd->shm_object);
893 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
894 length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
896 VM_OBJECT_WUNLOCK(shmfd->shm_object);
901 shm_dotruncate(struct shmfd *shmfd, off_t length)
906 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
907 error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
908 shm_rangelock_unlock(shmfd, rl_cookie);
913 * shmfd object management including creation and reference counting
917 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
922 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
924 shmfd->shm_uid = ucred->cr_uid;
925 shmfd->shm_gid = ucred->cr_gid;
926 shmfd->shm_mode = mode;
928 shmfd->shm_object = phys_pager_allocate(NULL,
929 &shm_largepage_phys_ops, NULL, shmfd->shm_size,
930 VM_PROT_DEFAULT, 0, ucred);
931 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
933 obj = vm_pager_allocate(shmfd_pager_type, NULL,
934 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
935 VM_OBJECT_WLOCK(obj);
936 obj->un_pager.swp.swp_priv = shmfd;
937 VM_OBJECT_WUNLOCK(obj);
938 shmfd->shm_object = obj;
940 KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
941 vfs_timestamp(&shmfd->shm_birthtime);
942 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
943 shmfd->shm_birthtime;
944 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
945 refcount_init(&shmfd->shm_refs, 1);
946 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
947 rangelock_init(&shmfd->shm_rl);
949 mac_posixshm_init(shmfd);
950 mac_posixshm_create(ucred, shmfd);
957 shm_hold(struct shmfd *shmfd)
960 refcount_acquire(&shmfd->shm_refs);
965 shm_drop(struct shmfd *shmfd)
969 if (refcount_release(&shmfd->shm_refs)) {
971 mac_posixshm_destroy(shmfd);
973 rangelock_destroy(&shmfd->shm_rl);
974 mtx_destroy(&shmfd->shm_mtx);
975 obj = shmfd->shm_object;
976 if (!shm_largepage(shmfd)) {
977 VM_OBJECT_WLOCK(obj);
978 obj->un_pager.swp.swp_priv = NULL;
979 VM_OBJECT_WUNLOCK(obj);
981 vm_object_deallocate(obj);
982 free(shmfd, M_SHMFD);
987 * Determine if the credentials have sufficient permissions for a
988 * specified combination of FREAD and FWRITE.
991 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1001 mtx_lock(&shm_timestamp_lock);
1002 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1004 mtx_unlock(&shm_timestamp_lock);
1014 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1015 sx_init(&shm_dict_lock, "shm dictionary");
1016 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1017 new_unrhdr64(&shm_ino_unr, 1);
1018 shm_dev_ino = devfs_alloc_cdp_inode();
1019 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1020 shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1022 MPASS(shmfd_pager_type != -1);
1024 for (i = 1; i < MAXPAGESIZES; i++) {
1025 if (pagesizes[i] == 0)
1027 #define M (1024 * 1024)
1028 #define G (1024 * M)
1029 if (pagesizes[i] >= G)
1030 snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1031 else if (pagesizes[i] >= M)
1032 snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1034 snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1037 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1038 OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1039 "number of non-transient largepages allocated");
1042 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1045 * Remove all shared memory objects that belong to a prison.
1048 shm_remove_prison(struct prison *pr)
1050 struct shm_mapping *shmm, *tshmm;
1053 sx_xlock(&shm_dict_lock);
1054 for (i = 0; i < shm_hash + 1; i++) {
1055 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1056 if (shmm->sm_shmfd->shm_object->cred &&
1057 shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1061 sx_xunlock(&shm_dict_lock);
1065 * Dictionary management. We maintain an in-kernel dictionary to map
1066 * paths to shmfd objects. We use the FNV hash on the path to store
1067 * the mappings in a hash table.
1069 static struct shmfd *
1070 shm_lookup(char *path, Fnv32_t fnv)
1072 struct shm_mapping *map;
1074 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1075 if (map->sm_fnv != fnv)
1077 if (strcmp(map->sm_path, path) == 0)
1078 return (map->sm_shmfd);
1085 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1087 struct shm_mapping *map;
1089 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1090 map->sm_path = path;
1092 map->sm_shmfd = shm_hold(shmfd);
1093 shmfd->shm_path = path;
1094 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1098 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1100 struct shm_mapping *map;
1103 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1104 if (map->sm_fnv != fnv)
1106 if (strcmp(map->sm_path, path) == 0) {
1108 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1112 error = shm_access(map->sm_shmfd, ucred,
1125 shm_doremove(struct shm_mapping *map)
1127 map->sm_shmfd->shm_path = NULL;
1128 LIST_REMOVE(map, sm_link);
1129 shm_drop(map->sm_shmfd);
1130 free(map->sm_path, M_SHMFD);
1135 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1136 int shmflags, struct filecaps *fcaps, const char *name __unused)
1138 struct pwddesc *pdp;
1139 struct shmfd *shmfd;
1145 int error, fd, initial_seals;
1148 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1149 SHM_LARGEPAGE)) != 0)
1152 initial_seals = F_SEAL_SEAL;
1153 if ((shmflags & SHM_ALLOW_SEALING) != 0)
1154 initial_seals &= ~F_SEAL_SEAL;
1156 #ifdef CAPABILITY_MODE
1158 * shm_open(2) is only allowed for anonymous objects.
1160 if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1164 AUDIT_ARG_FFLAGS(flags);
1165 AUDIT_ARG_MODE(mode);
1167 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1170 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1173 largepage = (shmflags & SHM_LARGEPAGE) != 0;
1174 if (largepage && !PMAP_HAS_LARGEPAGES)
1178 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1179 * If the decision is made later to allow additional seals, care must be
1180 * taken below to ensure that the seals are properly set if the shmfd
1181 * already existed -- this currently assumes that only F_SEAL_SEAL can
1182 * be set and doesn't take further precautions to ensure the validity of
1183 * the seals being added with respect to current mappings.
1185 if ((initial_seals & ~F_SEAL_SEAL) != 0)
1188 pdp = td->td_proc->p_pd;
1189 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1192 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1193 * by POSIX. We allow it to be unset here so that an in-kernel
1194 * interface may be written as a thin layer around shm, optionally not
1195 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally
1196 * in sys_shm_open() to keep this implementation compliant.
1198 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1202 /* A SHM_ANON path pointer creates an anonymous object. */
1203 if (userpath == SHM_ANON) {
1204 /* A read-only anonymous object is pointless. */
1205 if ((flags & O_ACCMODE) == O_RDONLY) {
1206 fdclose(td, fp, fd);
1210 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1211 shmfd->shm_seals = initial_seals;
1212 shmfd->shm_flags = shmflags;
1214 error = shm_copyin_path(td, userpath, &path);
1216 fdclose(td, fp, fd);
1221 AUDIT_ARG_UPATH1_CANON(path);
1222 fnv = fnv_32_str(path, FNV1_32_INIT);
1223 sx_xlock(&shm_dict_lock);
1224 shmfd = shm_lookup(path, fnv);
1225 if (shmfd == NULL) {
1226 /* Object does not yet exist, create it if requested. */
1227 if (flags & O_CREAT) {
1229 error = mac_posixshm_check_create(td->td_ucred,
1233 shmfd = shm_alloc(td->td_ucred, cmode,
1235 shmfd->shm_seals = initial_seals;
1236 shmfd->shm_flags = shmflags;
1237 shm_insert(path, fnv, shmfd);
1242 free(path, M_SHMFD);
1246 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1249 * kern_shm_open() likely shouldn't ever error out on
1250 * trying to set a seal that already exists, unlike
1251 * F_ADD_SEALS. This would break terribly as
1252 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1253 * historical behavior where the underlying file could
1256 initial_seals &= ~shmfd->shm_seals;
1259 * Object already exists, obtain a new
1260 * reference if requested and permitted.
1262 free(path, M_SHMFD);
1265 * initial_seals can't set additional seals if we've
1266 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set,
1267 * then we've already removed that one from
1268 * initial_seals. This is currently redundant as we
1269 * only allow setting F_SEAL_SEAL at creation time, but
1270 * it's cheap to check and decreases the effort required
1271 * to allow additional seals.
1273 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1276 else if ((flags & (O_CREAT | O_EXCL)) ==
1279 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1283 error = mac_posixshm_check_open(td->td_ucred,
1284 shmfd, FFLAGS(flags & O_ACCMODE));
1287 error = shm_access(shmfd, td->td_ucred,
1288 FFLAGS(flags & O_ACCMODE));
1292 * Truncate the file back to zero length if
1293 * O_TRUNC was specified and the object was
1294 * opened with read/write.
1297 (flags & (O_ACCMODE | O_TRUNC)) ==
1298 (O_RDWR | O_TRUNC)) {
1299 VM_OBJECT_WLOCK(shmfd->shm_object);
1301 error = mac_posixshm_check_truncate(
1302 td->td_ucred, fp->f_cred, shmfd);
1305 error = shm_dotruncate_locked(shmfd, 0,
1307 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1311 * Currently we only allow F_SEAL_SEAL to be
1312 * set initially. As noted above, this would
1313 * need to be reworked should that change.
1315 shmfd->shm_seals |= initial_seals;
1318 shm_rangelock_unlock(shmfd, rl_cookie);
1320 sx_xunlock(&shm_dict_lock);
1323 fdclose(td, fp, fd);
1329 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1331 td->td_retval[0] = fd;
1338 #ifdef COMPAT_FREEBSD12
1340 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1343 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1349 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1355 error = shm_copyin_path(td, uap->path, &path);
1359 AUDIT_ARG_UPATH1_CANON(path);
1360 fnv = fnv_32_str(path, FNV1_32_INIT);
1361 sx_xlock(&shm_dict_lock);
1362 error = shm_remove(path, fnv, td->td_ucred);
1363 sx_xunlock(&shm_dict_lock);
1364 free(path, M_SHMFD);
1370 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1372 char *path_from = NULL, *path_to = NULL;
1373 Fnv32_t fnv_from, fnv_to;
1374 struct shmfd *fd_from;
1375 struct shmfd *fd_to;
1380 AUDIT_ARG_FFLAGS(flags);
1383 * Make sure the user passed only valid flags.
1384 * If you add a new flag, please add a new term here.
1387 SHM_RENAME_NOREPLACE |
1395 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1396 * force the user to choose one or the other.
1398 if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1399 (flags & SHM_RENAME_EXCHANGE) != 0) {
1404 /* Renaming to or from anonymous makes no sense */
1405 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1410 error = shm_copyin_path(td, uap->path_from, &path_from);
1414 error = shm_copyin_path(td, uap->path_to, &path_to);
1418 AUDIT_ARG_UPATH1_CANON(path_from);
1419 AUDIT_ARG_UPATH2_CANON(path_to);
1421 /* Rename with from/to equal is a no-op */
1422 if (strcmp(path_from, path_to) == 0)
1425 fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1426 fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1428 sx_xlock(&shm_dict_lock);
1430 fd_from = shm_lookup(path_from, fnv_from);
1431 if (fd_from == NULL) {
1436 fd_to = shm_lookup(path_to, fnv_to);
1437 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1443 * Unconditionally prevents shm_remove from invalidating the 'from'
1447 error = shm_remove(path_from, fnv_from, td->td_ucred);
1450 * One of my assumptions failed if ENOENT (e.g. locking didn't
1453 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1461 * If we are exchanging, we need to ensure the shm_remove below
1462 * doesn't invalidate the dest shm's state.
1464 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1468 * NOTE: if path_to is not already in the hash, c'est la vie;
1469 * it simply means we have nothing already at path_to to unlink.
1470 * That is the ENOENT case.
1472 * If we somehow don't have access to unlink this guy, but
1473 * did for the shm at path_from, then relink the shm to path_from
1474 * and abort with EACCES.
1476 * All other errors: that is weird; let's relink and abort the
1479 error = shm_remove(path_to, fnv_to, td->td_ucred);
1480 if (error != 0 && error != ENOENT) {
1481 shm_insert(path_from, fnv_from, fd_from);
1483 /* Don't free path_from now, since the hash references it */
1490 shm_insert(path_to, fnv_to, fd_from);
1492 /* Don't free path_to now, since the hash references it */
1495 /* We kept a ref when we removed, and incremented again in insert */
1497 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1498 fd_from->shm_refs));
1500 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1501 shm_insert(path_from, fnv_from, fd_to);
1504 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1509 sx_xunlock(&shm_dict_lock);
1512 free(path_from, M_SHMFD);
1513 free(path_to, M_SHMFD);
1518 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1519 vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1520 vm_ooffset_t foff, struct thread *td)
1522 struct vmspace *vms;
1523 vm_map_entry_t next_entry, prev_entry;
1524 vm_offset_t align, mask, maxaddr;
1525 int docow, error, rv, try;
1528 if (shmfd->shm_lp_psind == 0)
1531 /* MAP_PRIVATE is disabled */
1532 if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1537 MAP_ALIGNMENT_MASK)) != 0)
1540 vms = td->td_proc->p_vmspace;
1541 curmap = map == &vms->vm_map;
1543 error = kern_mmap_racct_check(td, map, size);
1548 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1549 docow |= MAP_INHERIT_SHARE;
1550 if ((flags & MAP_NOCORE) != 0)
1551 docow |= MAP_DISABLE_COREDUMP;
1553 mask = pagesizes[shmfd->shm_lp_psind] - 1;
1554 if ((foff & mask) != 0)
1556 maxaddr = vm_map_max(map);
1558 if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1559 maxaddr = MAP_32BIT_MAX_ADDR;
1561 if (size == 0 || (size & mask) != 0 ||
1562 (*addr != 0 && ((*addr & mask) != 0 ||
1563 *addr + size < *addr || *addr + size > maxaddr)))
1566 align = flags & MAP_ALIGNMENT_MASK;
1568 align = pagesizes[shmfd->shm_lp_psind];
1569 } else if (align == MAP_ALIGNED_SUPER) {
1570 if (shmfd->shm_lp_psind != 1)
1572 align = pagesizes[1];
1574 align >>= MAP_ALIGNMENT_SHIFT;
1575 align = 1ULL << align;
1576 /* Also handles overflow. */
1577 if (align < pagesizes[shmfd->shm_lp_psind])
1582 if ((flags & MAP_FIXED) == 0) {
1584 if (curmap && (*addr == 0 ||
1585 (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1586 *addr < round_page((vm_offset_t)vms->vm_daddr +
1587 lim_max(td, RLIMIT_DATA))))) {
1588 *addr = roundup2((vm_offset_t)vms->vm_daddr +
1589 lim_max(td, RLIMIT_DATA),
1590 pagesizes[shmfd->shm_lp_psind]);
1593 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1594 if (rv != KERN_SUCCESS) {
1597 *addr = vm_map_min(map);
1598 if ((*addr & mask) != 0)
1599 *addr = (*addr + mask) & mask;
1604 } else if ((flags & MAP_EXCL) == 0) {
1605 rv = vm_map_delete(map, *addr, *addr + size);
1606 if (rv != KERN_SUCCESS)
1610 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1612 next_entry = vm_map_entry_succ(prev_entry);
1613 if (next_entry->start < *addr + size)
1617 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1618 prot, max_prot, docow);
1620 error = vm_mmap_to_errno(rv);
1627 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1628 vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1629 vm_ooffset_t foff, struct thread *td)
1631 struct shmfd *shmfd;
1638 maxprot = VM_PROT_NONE;
1640 rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1641 /* FREAD should always be set. */
1642 if ((fp->f_flag & FREAD) != 0)
1643 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1646 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1647 * mapping with a write seal applied. Private mappings are always
1650 if ((flags & MAP_SHARED) == 0) {
1651 cap_maxprot |= VM_PROT_WRITE;
1652 maxprot |= VM_PROT_WRITE;
1655 if ((fp->f_flag & FWRITE) != 0 &&
1656 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1657 maxprot |= VM_PROT_WRITE;
1660 * Any mappings from a writable descriptor may be upgraded to
1661 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1662 * applied between the open and subsequent mmap(2). We want to
1663 * reject application of a write seal as long as any such
1664 * mapping exists so that the seal cannot be trivially bypassed.
1666 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1667 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1672 maxprot &= cap_maxprot;
1674 /* See comment in vn_mmap(). */
1677 objsize > OFF_MAX ||
1679 foff > OFF_MAX - objsize) {
1685 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1690 mtx_lock(&shm_timestamp_lock);
1691 vfs_timestamp(&shmfd->shm_atime);
1692 mtx_unlock(&shm_timestamp_lock);
1693 vm_object_reference(shmfd->shm_object);
1695 if (shm_largepage(shmfd)) {
1697 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1698 maxprot, flags, foff, td);
1701 vm_pager_update_writecount(shmfd->shm_object, 0,
1704 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1705 shmfd->shm_object, foff, writecnt, td);
1709 vm_pager_release_writecount(shmfd->shm_object, 0,
1711 vm_object_deallocate(shmfd->shm_object);
1714 shm_rangelock_unlock(shmfd, rl_cookie);
1719 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1722 struct shmfd *shmfd;
1727 mtx_lock(&shm_timestamp_lock);
1729 * SUSv4 says that x bits of permission need not be affected.
1730 * Be consistent with our shm_open there.
1733 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1737 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1738 VADMIN, active_cred);
1741 shmfd->shm_mode = mode & ACCESSPERMS;
1743 mtx_unlock(&shm_timestamp_lock);
1748 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1751 struct shmfd *shmfd;
1756 mtx_lock(&shm_timestamp_lock);
1758 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1762 if (uid == (uid_t)-1)
1763 uid = shmfd->shm_uid;
1764 if (gid == (gid_t)-1)
1765 gid = shmfd->shm_gid;
1766 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1767 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1768 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1770 shmfd->shm_uid = uid;
1771 shmfd->shm_gid = gid;
1773 mtx_unlock(&shm_timestamp_lock);
1778 * Helper routines to allow the backing object of a shared memory file
1779 * descriptor to be mapped in the kernel.
1782 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1784 struct shmfd *shmfd;
1785 vm_offset_t kva, ofs;
1789 if (fp->f_type != DTYPE_SHM)
1792 obj = shmfd->shm_object;
1793 VM_OBJECT_WLOCK(obj);
1795 * XXXRW: This validation is probably insufficient, and subject to
1796 * sign errors. It should be fixed.
1798 if (offset >= shmfd->shm_size ||
1799 offset + size > round_page(shmfd->shm_size)) {
1800 VM_OBJECT_WUNLOCK(obj);
1804 shmfd->shm_kmappings++;
1805 vm_object_reference_locked(obj);
1806 VM_OBJECT_WUNLOCK(obj);
1808 /* Map the object into the kernel_map and wire it. */
1809 kva = vm_map_min(kernel_map);
1810 ofs = offset & PAGE_MASK;
1811 offset = trunc_page(offset);
1812 size = round_page(size + ofs);
1813 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1814 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1815 VM_PROT_READ | VM_PROT_WRITE, 0);
1816 if (rv == KERN_SUCCESS) {
1817 rv = vm_map_wire(kernel_map, kva, kva + size,
1818 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1819 if (rv == KERN_SUCCESS) {
1820 *memp = (void *)(kva + ofs);
1823 vm_map_remove(kernel_map, kva, kva + size);
1825 vm_object_deallocate(obj);
1827 /* On failure, drop our mapping reference. */
1828 VM_OBJECT_WLOCK(obj);
1829 shmfd->shm_kmappings--;
1830 VM_OBJECT_WUNLOCK(obj);
1832 return (vm_mmap_to_errno(rv));
1836 * We require the caller to unmap the entire entry. This allows us to
1837 * safely decrement shm_kmappings when a mapping is removed.
1840 shm_unmap(struct file *fp, void *mem, size_t size)
1842 struct shmfd *shmfd;
1843 vm_map_entry_t entry;
1844 vm_offset_t kva, ofs;
1852 if (fp->f_type != DTYPE_SHM)
1855 kva = (vm_offset_t)mem;
1856 ofs = kva & PAGE_MASK;
1857 kva = trunc_page(kva);
1858 size = round_page(size + ofs);
1860 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1861 &obj, &pindex, &prot, &wired);
1862 if (rv != KERN_SUCCESS)
1864 if (entry->start != kva || entry->end != kva + size) {
1865 vm_map_lookup_done(map, entry);
1868 vm_map_lookup_done(map, entry);
1869 if (obj != shmfd->shm_object)
1871 vm_map_remove(map, kva, kva + size);
1872 VM_OBJECT_WLOCK(obj);
1873 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1874 shmfd->shm_kmappings--;
1875 VM_OBJECT_WUNLOCK(obj);
1880 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1882 const char *path, *pr_path;
1886 sx_assert(&shm_dict_lock, SA_LOCKED);
1887 kif->kf_type = KF_TYPE_SHM;
1888 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1889 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1890 if (shmfd->shm_path != NULL) {
1891 if (shmfd->shm_path != NULL) {
1892 path = shmfd->shm_path;
1893 pr_path = curthread->td_ucred->cr_prison->pr_path;
1894 if (strcmp(pr_path, "/") != 0) {
1895 /* Return the jail-rooted pathname. */
1896 pr_pathlen = strlen(pr_path);
1897 visible = strncmp(path, pr_path, pr_pathlen)
1898 == 0 && path[pr_pathlen] == '/';
1899 if (list && !visible)
1904 strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1911 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1912 struct filedesc *fdp __unused)
1916 sx_slock(&shm_dict_lock);
1917 res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1918 sx_sunlock(&shm_dict_lock);
1923 shm_add_seals(struct file *fp, int seals)
1925 struct shmfd *shmfd;
1927 vm_ooffset_t writemappings;
1932 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1934 /* Even already-set seals should result in EPERM. */
1935 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1939 nseals = seals & ~shmfd->shm_seals;
1940 if ((nseals & F_SEAL_WRITE) != 0) {
1941 if (shm_largepage(shmfd)) {
1947 * The rangelock above prevents writable mappings from being
1948 * added after we've started applying seals. The RLOCK here
1949 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1950 * writemappings will be done without a rangelock.
1952 VM_OBJECT_RLOCK(shmfd->shm_object);
1953 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1954 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1955 /* kmappings are also writable */
1956 if (writemappings > 0) {
1961 shmfd->shm_seals |= nseals;
1963 shm_rangelock_unlock(shmfd, rl_cookie);
1968 shm_get_seals(struct file *fp, int *seals)
1970 struct shmfd *shmfd;
1973 *seals = shmfd->shm_seals;
1978 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1981 struct shmfd *shmfd;
1985 /* This assumes that the caller already checked for overflow. */
1988 size = offset + len;
1991 * Just grab the rangelock for the range that we may be attempting to
1992 * grow, rather than blocking read/write for regions we won't be
1993 * touching while this (potential) resize is in progress. Other
1994 * attempts to resize the shmfd will have to take a write lock from 0 to
1995 * OFF_MAX, so this being potentially beyond the current usable range of
1996 * the shmfd is not necessarily a concern. If other mechanisms are
1997 * added to grow a shmfd, this may need to be re-evaluated.
1999 rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2000 if (size > shmfd->shm_size)
2001 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2002 shm_rangelock_unlock(shmfd, rl_cookie);
2003 /* Translate to posix_fallocate(2) return value as needed. */
2004 if (error == ENOMEM)
2010 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2012 struct shm_mapping *shmm;
2014 struct kinfo_file kif;
2019 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2020 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2023 sx_slock(&shm_dict_lock);
2024 for (i = 0; i < shm_hash + 1; i++) {
2025 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2026 error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2028 if (error == EPERM) {
2035 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2039 curlen += kif.kf_structsize;
2042 sx_sunlock(&shm_dict_lock);
2043 error2 = sbuf_finish(&sb);
2045 return (error != 0 ? error : error2);
2048 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2049 CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2050 NULL, 0, sysctl_posix_shm_list, "",
2054 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2055 struct filecaps *caps)
2058 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2062 * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2063 * caller, and libc will enforce it for the traditional shm_open() call. This
2064 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
2065 * interface also includes a 'name' argument that is currently unused, but could
2066 * potentially be exported later via some interface for debugging purposes.
2067 * From the kernel's perspective, it is optional. Individual consumers like
2068 * memfd_create() may require it in order to be compatible with other systems
2069 * implementing the same function.
2072 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2075 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2076 uap->shmflags, NULL, uap->name));