]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/uipc_shm.c
tmpfs: for used pages, account really allocated pages, instead of file sizes
[FreeBSD/FreeBSD.git] / sys / kern / uipc_shm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5  * Copyright 2020 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by BAE Systems, the University of
9  * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10  * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11  * Computing (TC) research program.
12  *
13  * Portions of this software were developed by Konstantin Belousov
14  * under sponsorship from the FreeBSD Foundation.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 /*
39  * Support for shared swap-backed anonymous memory objects via
40  * shm_open(2), shm_rename(2), and shm_unlink(2).
41  * While most of the implementation is here, vm_mmap.c contains
42  * mapping logic changes.
43  *
44  * posixshmcontrol(1) allows users to inspect the state of the memory
45  * objects.  Per-uid swap resource limit controls total amount of
46  * memory that user can consume for anonymous objects, including
47  * shared.
48  */
49
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52
53 #include "opt_capsicum.h"
54 #include "opt_ktrace.h"
55
56 #include <sys/param.h>
57 #include <sys/capsicum.h>
58 #include <sys/conf.h>
59 #include <sys/fcntl.h>
60 #include <sys/file.h>
61 #include <sys/filedesc.h>
62 #include <sys/filio.h>
63 #include <sys/fnv_hash.h>
64 #include <sys/kernel.h>
65 #include <sys/limits.h>
66 #include <sys/uio.h>
67 #include <sys/signal.h>
68 #include <sys/jail.h>
69 #include <sys/ktrace.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mman.h>
73 #include <sys/mutex.h>
74 #include <sys/priv.h>
75 #include <sys/proc.h>
76 #include <sys/refcount.h>
77 #include <sys/resourcevar.h>
78 #include <sys/rwlock.h>
79 #include <sys/sbuf.h>
80 #include <sys/stat.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysproto.h>
84 #include <sys/systm.h>
85 #include <sys/sx.h>
86 #include <sys/time.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vnode.h>
89 #include <sys/unistd.h>
90 #include <sys/user.h>
91
92 #include <security/audit/audit.h>
93 #include <security/mac/mac_framework.h>
94
95 #include <vm/vm.h>
96 #include <vm/vm_param.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106
107 struct shm_mapping {
108         char            *sm_path;
109         Fnv32_t         sm_fnv;
110         struct shmfd    *sm_shmfd;
111         LIST_ENTRY(shm_mapping) sm_link;
112 };
113
114 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
115 static LIST_HEAD(, shm_mapping) *shm_dictionary;
116 static struct sx shm_dict_lock;
117 static struct mtx shm_timestamp_lock;
118 static u_long shm_hash;
119 static struct unrhdr64 shm_ino_unr;
120 static dev_t shm_dev_ino;
121
122 #define SHM_HASH(fnv)   (&shm_dictionary[(fnv) & shm_hash])
123
124 static void     shm_init(void *arg);
125 static void     shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
126 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
127 static int      shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
128 static void     shm_doremove(struct shm_mapping *map);
129 static int      shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
130     void *rl_cookie);
131 static int      shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
132     void *rl_cookie);
133 static int      shm_copyin_path(struct thread *td, const char *userpath_in,
134     char **path_out);
135
136 static fo_rdwr_t        shm_read;
137 static fo_rdwr_t        shm_write;
138 static fo_truncate_t    shm_truncate;
139 static fo_ioctl_t       shm_ioctl;
140 static fo_stat_t        shm_stat;
141 static fo_close_t       shm_close;
142 static fo_chmod_t       shm_chmod;
143 static fo_chown_t       shm_chown;
144 static fo_seek_t        shm_seek;
145 static fo_fill_kinfo_t  shm_fill_kinfo;
146 static fo_mmap_t        shm_mmap;
147 static fo_get_seals_t   shm_get_seals;
148 static fo_add_seals_t   shm_add_seals;
149 static fo_fallocate_t   shm_fallocate;
150
151 /* File descriptor operations. */
152 struct fileops shm_ops = {
153         .fo_read = shm_read,
154         .fo_write = shm_write,
155         .fo_truncate = shm_truncate,
156         .fo_ioctl = shm_ioctl,
157         .fo_poll = invfo_poll,
158         .fo_kqfilter = invfo_kqfilter,
159         .fo_stat = shm_stat,
160         .fo_close = shm_close,
161         .fo_chmod = shm_chmod,
162         .fo_chown = shm_chown,
163         .fo_sendfile = vn_sendfile,
164         .fo_seek = shm_seek,
165         .fo_fill_kinfo = shm_fill_kinfo,
166         .fo_mmap = shm_mmap,
167         .fo_get_seals = shm_get_seals,
168         .fo_add_seals = shm_add_seals,
169         .fo_fallocate = shm_fallocate,
170         .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
171 };
172
173 FEATURE(posix_shm, "POSIX shared memory");
174
175 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
176     "");
177
178 static int largepage_reclaim_tries = 1;
179 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
180     CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
181     "Number of contig reclaims before giving up for default alloc policy");
182
183 static int
184 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
185 {
186         vm_page_t m;
187         vm_pindex_t idx;
188         size_t tlen;
189         int error, offset, rv;
190
191         idx = OFF_TO_IDX(uio->uio_offset);
192         offset = uio->uio_offset & PAGE_MASK;
193         tlen = MIN(PAGE_SIZE - offset, len);
194
195         rv = vm_page_grab_valid_unlocked(&m, obj, idx,
196             VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
197         if (rv == VM_PAGER_OK)
198                 goto found;
199
200         /*
201          * Read I/O without either a corresponding resident page or swap
202          * page: use zero_region.  This is intended to avoid instantiating
203          * pages on read from a sparse region.
204          */
205         VM_OBJECT_WLOCK(obj);
206         m = vm_page_lookup(obj, idx);
207         if (uio->uio_rw == UIO_READ && m == NULL &&
208             !vm_pager_has_page(obj, idx, NULL, NULL)) {
209                 VM_OBJECT_WUNLOCK(obj);
210                 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
211         }
212
213         /*
214          * Although the tmpfs vnode lock is held here, it is
215          * nonetheless safe to sleep waiting for a free page.  The
216          * pageout daemon does not need to acquire the tmpfs vnode
217          * lock to page out tobj's pages because tobj is a OBJT_SWAP
218          * type object.
219          */
220         rv = vm_page_grab_valid(&m, obj, idx,
221             VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
222         if (rv != VM_PAGER_OK) {
223                 VM_OBJECT_WUNLOCK(obj);
224                 if (bootverbose) {
225                         printf("uiomove_object: vm_obj %p idx %jd "
226                             "pager error %d\n", obj, idx, rv);
227                 }
228                 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
229         }
230         VM_OBJECT_WUNLOCK(obj);
231
232 found:
233         error = uiomove_fromphys(&m, offset, tlen, uio);
234         if (uio->uio_rw == UIO_WRITE && error == 0)
235                 vm_page_set_dirty(m);
236         vm_page_activate(m);
237         vm_page_sunbusy(m);
238
239         return (error);
240 }
241
242 int
243 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
244 {
245         ssize_t resid;
246         size_t len;
247         int error;
248
249         error = 0;
250         while ((resid = uio->uio_resid) > 0) {
251                 if (obj_size <= uio->uio_offset)
252                         break;
253                 len = MIN(obj_size - uio->uio_offset, resid);
254                 if (len == 0)
255                         break;
256                 error = uiomove_object_page(obj, len, uio);
257                 if (error != 0 || resid == uio->uio_resid)
258                         break;
259         }
260         return (error);
261 }
262
263 static u_long count_largepages[MAXPAGESIZES];
264
265 static int
266 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
267     int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
268 {
269         vm_page_t m __diagused;
270         int psind;
271
272         psind = object->un_pager.phys.data_val;
273         if (psind == 0 || pidx >= object->size)
274                 return (VM_PAGER_FAIL);
275         *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
276
277         /*
278          * We only busy the first page in the superpage run.  It is
279          * useless to busy whole run since we only remove full
280          * superpage, and it takes too long to busy e.g. 512 * 512 ==
281          * 262144 pages constituing 1G amd64 superage.
282          */
283         m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
284         MPASS(m != NULL);
285
286         *last = *first + atop(pagesizes[psind]) - 1;
287         return (VM_PAGER_OK);
288 }
289
290 static boolean_t
291 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
292     int *before, int *after)
293 {
294         int psind;
295
296         psind = object->un_pager.phys.data_val;
297         if (psind == 0 || pindex >= object->size)
298                 return (FALSE);
299         if (before != NULL) {
300                 *before = pindex - rounddown2(pindex, pagesizes[psind] /
301                     PAGE_SIZE);
302         }
303         if (after != NULL) {
304                 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
305                     pindex;
306         }
307         return (TRUE);
308 }
309
310 static void
311 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
312     vm_ooffset_t foff, struct ucred *cred)
313 {
314 }
315
316 static void
317 shm_largepage_phys_dtor(vm_object_t object)
318 {
319         int psind;
320
321         psind = object->un_pager.phys.data_val;
322         if (psind != 0) {
323                 atomic_subtract_long(&count_largepages[psind],
324                     object->size / (pagesizes[psind] / PAGE_SIZE));
325                 vm_wire_sub(object->size);
326         } else {
327                 KASSERT(object->size == 0,
328                     ("largepage phys obj %p not initialized bit size %#jx > 0",
329                     object, (uintmax_t)object->size));
330         }
331 }
332
333 static const struct phys_pager_ops shm_largepage_phys_ops = {
334         .phys_pg_populate =     shm_largepage_phys_populate,
335         .phys_pg_haspage =      shm_largepage_phys_haspage,
336         .phys_pg_ctor =         shm_largepage_phys_ctor,
337         .phys_pg_dtor =         shm_largepage_phys_dtor,
338 };
339
340 bool
341 shm_largepage(struct shmfd *shmfd)
342 {
343         return (shmfd->shm_object->type == OBJT_PHYS);
344 }
345
346 static int
347 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
348 {
349         struct shmfd *shmfd;
350         off_t foffset;
351         int error;
352
353         shmfd = fp->f_data;
354         foffset = foffset_lock(fp, 0);
355         error = 0;
356         switch (whence) {
357         case L_INCR:
358                 if (foffset < 0 ||
359                     (offset > 0 && foffset > OFF_MAX - offset)) {
360                         error = EOVERFLOW;
361                         break;
362                 }
363                 offset += foffset;
364                 break;
365         case L_XTND:
366                 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
367                         error = EOVERFLOW;
368                         break;
369                 }
370                 offset += shmfd->shm_size;
371                 break;
372         case L_SET:
373                 break;
374         default:
375                 error = EINVAL;
376         }
377         if (error == 0) {
378                 if (offset < 0 || offset > shmfd->shm_size)
379                         error = EINVAL;
380                 else
381                         td->td_uretoff.tdu_off = offset;
382         }
383         foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
384         return (error);
385 }
386
387 static int
388 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
389     int flags, struct thread *td)
390 {
391         struct shmfd *shmfd;
392         void *rl_cookie;
393         int error;
394
395         shmfd = fp->f_data;
396 #ifdef MAC
397         error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
398         if (error)
399                 return (error);
400 #endif
401         foffset_lock_uio(fp, uio, flags);
402         rl_cookie = rangelock_rlock(&shmfd->shm_rl, uio->uio_offset,
403             uio->uio_offset + uio->uio_resid, &shmfd->shm_mtx);
404         error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
405         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
406         foffset_unlock_uio(fp, uio, flags);
407         return (error);
408 }
409
410 static int
411 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
412     int flags, struct thread *td)
413 {
414         struct shmfd *shmfd;
415         void *rl_cookie;
416         int error;
417         off_t size;
418
419         shmfd = fp->f_data;
420 #ifdef MAC
421         error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
422         if (error)
423                 return (error);
424 #endif
425         if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
426                 return (EINVAL);
427         foffset_lock_uio(fp, uio, flags);
428         if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
429                 /*
430                  * Overflow is only an error if we're supposed to expand on
431                  * write.  Otherwise, we'll just truncate the write to the
432                  * size of the file, which can only grow up to OFF_MAX.
433                  */
434                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
435                         foffset_unlock_uio(fp, uio, flags);
436                         return (EFBIG);
437                 }
438
439                 size = shmfd->shm_size;
440         } else {
441                 size = uio->uio_offset + uio->uio_resid;
442         }
443         if ((flags & FOF_OFFSET) == 0) {
444                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
445                     &shmfd->shm_mtx);
446         } else {
447                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, uio->uio_offset,
448                     size, &shmfd->shm_mtx);
449         }
450         if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
451                 error = EPERM;
452         } else {
453                 error = 0;
454                 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
455                     size > shmfd->shm_size) {
456                         error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
457                 }
458                 if (error == 0)
459                         error = uiomove_object(shmfd->shm_object,
460                             shmfd->shm_size, uio);
461         }
462         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
463         foffset_unlock_uio(fp, uio, flags);
464         return (error);
465 }
466
467 static int
468 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
469     struct thread *td)
470 {
471         struct shmfd *shmfd;
472 #ifdef MAC
473         int error;
474 #endif
475
476         shmfd = fp->f_data;
477 #ifdef MAC
478         error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
479         if (error)
480                 return (error);
481 #endif
482         return (shm_dotruncate(shmfd, length));
483 }
484
485 int
486 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
487     struct thread *td)
488 {
489         struct shmfd *shmfd;
490         struct shm_largepage_conf *conf;
491         void *rl_cookie;
492
493         shmfd = fp->f_data;
494         switch (com) {
495         case FIONBIO:
496         case FIOASYNC:
497                 /*
498                  * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
499                  * just like it would on an unlinked regular file
500                  */
501                 return (0);
502         case FIOSSHMLPGCNF:
503                 if (!shm_largepage(shmfd))
504                         return (ENOTTY);
505                 conf = data;
506                 if (shmfd->shm_lp_psind != 0 &&
507                     conf->psind != shmfd->shm_lp_psind)
508                         return (EINVAL);
509                 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
510                     pagesizes[conf->psind] == 0)
511                         return (EINVAL);
512                 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
513                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
514                     conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
515                         return (EINVAL);
516
517                 rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
518                     &shmfd->shm_mtx);
519                 shmfd->shm_lp_psind = conf->psind;
520                 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
521                 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
522                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
523                 return (0);
524         case FIOGSHMLPGCNF:
525                 if (!shm_largepage(shmfd))
526                         return (ENOTTY);
527                 conf = data;
528                 rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, OFF_MAX,
529                     &shmfd->shm_mtx);
530                 conf->psind = shmfd->shm_lp_psind;
531                 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
532                 rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
533                 return (0);
534         default:
535                 return (ENOTTY);
536         }
537 }
538
539 static int
540 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
541     struct thread *td)
542 {
543         struct shmfd *shmfd;
544 #ifdef MAC
545         int error;
546 #endif
547
548         shmfd = fp->f_data;
549
550 #ifdef MAC
551         error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
552         if (error)
553                 return (error);
554 #endif
555
556         /*
557          * Attempt to return sanish values for fstat() on a memory file
558          * descriptor.
559          */
560         bzero(sb, sizeof(*sb));
561         sb->st_blksize = PAGE_SIZE;
562         sb->st_size = shmfd->shm_size;
563         sb->st_blocks = howmany(sb->st_size, sb->st_blksize);
564         mtx_lock(&shm_timestamp_lock);
565         sb->st_atim = shmfd->shm_atime;
566         sb->st_ctim = shmfd->shm_ctime;
567         sb->st_mtim = shmfd->shm_mtime;
568         sb->st_birthtim = shmfd->shm_birthtime;
569         sb->st_mode = S_IFREG | shmfd->shm_mode;                /* XXX */
570         sb->st_uid = shmfd->shm_uid;
571         sb->st_gid = shmfd->shm_gid;
572         mtx_unlock(&shm_timestamp_lock);
573         sb->st_dev = shm_dev_ino;
574         sb->st_ino = shmfd->shm_ino;
575         sb->st_nlink = shmfd->shm_object->ref_count;
576         sb->st_blocks = shmfd->shm_object->size /
577             (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
578
579         return (0);
580 }
581
582 static int
583 shm_close(struct file *fp, struct thread *td)
584 {
585         struct shmfd *shmfd;
586
587         shmfd = fp->f_data;
588         fp->f_data = NULL;
589         shm_drop(shmfd);
590
591         return (0);
592 }
593
594 static int
595 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
596         int error;
597         char *path;
598         const char *pr_path;
599         size_t pr_pathlen;
600
601         path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
602         pr_path = td->td_ucred->cr_prison->pr_path;
603
604         /* Construct a full pathname for jailed callers. */
605         pr_pathlen = strcmp(pr_path, "/") ==
606             0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
607         error = copyinstr(userpath_in, path + pr_pathlen,
608             MAXPATHLEN - pr_pathlen, NULL);
609         if (error != 0)
610                 goto out;
611
612 #ifdef KTRACE
613         if (KTRPOINT(curthread, KTR_NAMEI))
614                 ktrnamei(path);
615 #endif
616
617         /* Require paths to start with a '/' character. */
618         if (path[pr_pathlen] != '/') {
619                 error = EINVAL;
620                 goto out;
621         }
622
623         *path_out = path;
624
625 out:
626         if (error != 0)
627                 free(path, M_SHMFD);
628
629         return (error);
630 }
631
632 static int
633 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
634 {
635         vm_object_t object;
636         vm_page_t m;
637         vm_pindex_t idx, nobjsize;
638         vm_ooffset_t delta;
639         int base, rv;
640
641         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
642         object = shmfd->shm_object;
643         VM_OBJECT_ASSERT_WLOCKED(object);
644         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
645         if (length == shmfd->shm_size)
646                 return (0);
647         nobjsize = OFF_TO_IDX(length + PAGE_MASK);
648
649         /* Are we shrinking?  If so, trim the end. */
650         if (length < shmfd->shm_size) {
651                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
652                         return (EPERM);
653
654                 /*
655                  * Disallow any requests to shrink the size if this
656                  * object is mapped into the kernel.
657                  */
658                 if (shmfd->shm_kmappings > 0)
659                         return (EBUSY);
660
661                 /*
662                  * Zero the truncated part of the last page.
663                  */
664                 base = length & PAGE_MASK;
665                 if (base != 0) {
666                         idx = OFF_TO_IDX(length);
667 retry:
668                         m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
669                         if (m != NULL) {
670                                 MPASS(vm_page_all_valid(m));
671                         } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
672                                 m = vm_page_alloc(object, idx,
673                                     VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
674                                 if (m == NULL)
675                                         goto retry;
676                                 vm_object_pip_add(object, 1);
677                                 VM_OBJECT_WUNLOCK(object);
678                                 rv = vm_pager_get_pages(object, &m, 1, NULL,
679                                     NULL);
680                                 VM_OBJECT_WLOCK(object);
681                                 vm_object_pip_wakeup(object);
682                                 if (rv == VM_PAGER_OK) {
683                                         /*
684                                          * Since the page was not resident,
685                                          * and therefore not recently
686                                          * accessed, immediately enqueue it
687                                          * for asynchronous laundering.  The
688                                          * current operation is not regarded
689                                          * as an access.
690                                          */
691                                         vm_page_launder(m);
692                                 } else {
693                                         vm_page_free(m);
694                                         VM_OBJECT_WUNLOCK(object);
695                                         return (EIO);
696                                 }
697                         }
698                         if (m != NULL) {
699                                 pmap_zero_page_area(m, base, PAGE_SIZE - base);
700                                 KASSERT(vm_page_all_valid(m),
701                                     ("shm_dotruncate: page %p is invalid", m));
702                                 vm_page_set_dirty(m);
703                                 vm_page_xunbusy(m);
704                         }
705                 }
706                 delta = IDX_TO_OFF(object->size - nobjsize);
707
708                 if (nobjsize < object->size)
709                         vm_object_page_remove(object, nobjsize, object->size,
710                             0);
711
712                 /* Free the swap accounted for shm */
713                 swap_release_by_cred(delta, object->cred);
714                 object->charge -= delta;
715         } else {
716                 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
717                         return (EPERM);
718
719                 /* Try to reserve additional swap space. */
720                 delta = IDX_TO_OFF(nobjsize - object->size);
721                 if (!swap_reserve_by_cred(delta, object->cred))
722                         return (ENOMEM);
723                 object->charge += delta;
724         }
725         shmfd->shm_size = length;
726         mtx_lock(&shm_timestamp_lock);
727         vfs_timestamp(&shmfd->shm_ctime);
728         shmfd->shm_mtime = shmfd->shm_ctime;
729         mtx_unlock(&shm_timestamp_lock);
730         object->size = nobjsize;
731         return (0);
732 }
733
734 static int
735 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
736 {
737         vm_object_t object;
738         vm_page_t m;
739         vm_pindex_t newobjsz;
740         vm_pindex_t oldobjsz __unused;
741         int aflags, error, i, psind, try;
742
743         KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
744         object = shmfd->shm_object;
745         VM_OBJECT_ASSERT_WLOCKED(object);
746         rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
747
748         oldobjsz = object->size;
749         newobjsz = OFF_TO_IDX(length);
750         if (length == shmfd->shm_size)
751                 return (0);
752         psind = shmfd->shm_lp_psind;
753         if (psind == 0 && length != 0)
754                 return (EINVAL);
755         if ((length & (pagesizes[psind] - 1)) != 0)
756                 return (EINVAL);
757
758         if (length < shmfd->shm_size) {
759                 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
760                         return (EPERM);
761                 if (shmfd->shm_kmappings > 0)
762                         return (EBUSY);
763                 return (ENOTSUP);       /* Pages are unmanaged. */
764 #if 0
765                 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
766                 object->size = newobjsz;
767                 shmfd->shm_size = length;
768                 return (0);
769 #endif
770         }
771
772         if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
773                 return (EPERM);
774
775         aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
776         if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
777                 aflags |= VM_ALLOC_WAITFAIL;
778         try = 0;
779
780         /*
781          * Extend shmfd and object, keeping all already fully
782          * allocated large pages intact even on error, because dropped
783          * object lock might allowed mapping of them.
784          */
785         while (object->size < newobjsz) {
786                 m = vm_page_alloc_contig(object, object->size, aflags,
787                     pagesizes[psind] / PAGE_SIZE, 0, ~0,
788                     pagesizes[psind], 0,
789                     VM_MEMATTR_DEFAULT);
790                 if (m == NULL) {
791                         VM_OBJECT_WUNLOCK(object);
792                         if (shmfd->shm_lp_alloc_policy ==
793                             SHM_LARGEPAGE_ALLOC_NOWAIT ||
794                             (shmfd->shm_lp_alloc_policy ==
795                             SHM_LARGEPAGE_ALLOC_DEFAULT &&
796                             try >= largepage_reclaim_tries)) {
797                                 VM_OBJECT_WLOCK(object);
798                                 return (ENOMEM);
799                         }
800                         error = vm_page_reclaim_contig(aflags,
801                             pagesizes[psind] / PAGE_SIZE, 0, ~0,
802                             pagesizes[psind], 0) ? 0 :
803                             vm_wait_intr(object);
804                         if (error != 0) {
805                                 VM_OBJECT_WLOCK(object);
806                                 return (error);
807                         }
808                         try++;
809                         VM_OBJECT_WLOCK(object);
810                         continue;
811                 }
812                 try = 0;
813                 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
814                         if ((m[i].flags & PG_ZERO) == 0)
815                                 pmap_zero_page(&m[i]);
816                         vm_page_valid(&m[i]);
817                         vm_page_xunbusy(&m[i]);
818                 }
819                 object->size += OFF_TO_IDX(pagesizes[psind]);
820                 shmfd->shm_size += pagesizes[psind];
821                 atomic_add_long(&count_largepages[psind], 1);
822                 vm_wire_add(atop(pagesizes[psind]));
823         }
824         return (0);
825 }
826
827 static int
828 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
829 {
830         int error;
831
832         VM_OBJECT_WLOCK(shmfd->shm_object);
833         error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
834             length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
835             rl_cookie);
836         VM_OBJECT_WUNLOCK(shmfd->shm_object);
837         return (error);
838 }
839
840 int
841 shm_dotruncate(struct shmfd *shmfd, off_t length)
842 {
843         void *rl_cookie;
844         int error;
845
846         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
847             &shmfd->shm_mtx);
848         error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
849         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
850         return (error);
851 }
852
853 /*
854  * shmfd object management including creation and reference counting
855  * routines.
856  */
857 struct shmfd *
858 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
859 {
860         struct shmfd *shmfd;
861
862         shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
863         shmfd->shm_size = 0;
864         shmfd->shm_uid = ucred->cr_uid;
865         shmfd->shm_gid = ucred->cr_gid;
866         shmfd->shm_mode = mode;
867         if (largepage) {
868                 shmfd->shm_object = phys_pager_allocate(NULL,
869                     &shm_largepage_phys_ops, NULL, shmfd->shm_size,
870                     VM_PROT_DEFAULT, 0, ucred);
871                 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
872         } else {
873                 shmfd->shm_object = vm_pager_allocate(OBJT_SWAP, NULL,
874                     shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
875         }
876         KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
877         vfs_timestamp(&shmfd->shm_birthtime);
878         shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
879             shmfd->shm_birthtime;
880         shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
881         refcount_init(&shmfd->shm_refs, 1);
882         mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
883         rangelock_init(&shmfd->shm_rl);
884 #ifdef MAC
885         mac_posixshm_init(shmfd);
886         mac_posixshm_create(ucred, shmfd);
887 #endif
888
889         return (shmfd);
890 }
891
892 struct shmfd *
893 shm_hold(struct shmfd *shmfd)
894 {
895
896         refcount_acquire(&shmfd->shm_refs);
897         return (shmfd);
898 }
899
900 void
901 shm_drop(struct shmfd *shmfd)
902 {
903
904         if (refcount_release(&shmfd->shm_refs)) {
905 #ifdef MAC
906                 mac_posixshm_destroy(shmfd);
907 #endif
908                 rangelock_destroy(&shmfd->shm_rl);
909                 mtx_destroy(&shmfd->shm_mtx);
910                 vm_object_deallocate(shmfd->shm_object);
911                 free(shmfd, M_SHMFD);
912         }
913 }
914
915 /*
916  * Determine if the credentials have sufficient permissions for a
917  * specified combination of FREAD and FWRITE.
918  */
919 int
920 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
921 {
922         accmode_t accmode;
923         int error;
924
925         accmode = 0;
926         if (flags & FREAD)
927                 accmode |= VREAD;
928         if (flags & FWRITE)
929                 accmode |= VWRITE;
930         mtx_lock(&shm_timestamp_lock);
931         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
932             accmode, ucred);
933         mtx_unlock(&shm_timestamp_lock);
934         return (error);
935 }
936
937 static void
938 shm_init(void *arg)
939 {
940         char name[32];
941         int i;
942
943         mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
944         sx_init(&shm_dict_lock, "shm dictionary");
945         shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
946         new_unrhdr64(&shm_ino_unr, 1);
947         shm_dev_ino = devfs_alloc_cdp_inode();
948         KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
949
950         for (i = 1; i < MAXPAGESIZES; i++) {
951                 if (pagesizes[i] == 0)
952                         break;
953 #define M       (1024 * 1024)
954 #define G       (1024 * M)
955                 if (pagesizes[i] >= G)
956                         snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
957                 else if (pagesizes[i] >= M)
958                         snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
959                 else
960                         snprintf(name, sizeof(name), "%lu", pagesizes[i]);
961 #undef G
962 #undef M
963                 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
964                     OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
965                     "number of non-transient largepages allocated");
966         }
967 }
968 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
969
970 /*
971  * Remove all shared memory objects that belong to a prison.
972  */
973 void
974 shm_remove_prison(struct prison *pr)
975 {
976         struct shm_mapping *shmm, *tshmm;
977         u_long i;
978
979         sx_xlock(&shm_dict_lock);
980         for (i = 0; i < shm_hash + 1; i++) {
981                 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
982                         if (shmm->sm_shmfd->shm_object->cred &&
983                             shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
984                                 shm_doremove(shmm);
985                 }
986         }
987         sx_xunlock(&shm_dict_lock);
988 }
989
990 /*
991  * Dictionary management.  We maintain an in-kernel dictionary to map
992  * paths to shmfd objects.  We use the FNV hash on the path to store
993  * the mappings in a hash table.
994  */
995 static struct shmfd *
996 shm_lookup(char *path, Fnv32_t fnv)
997 {
998         struct shm_mapping *map;
999
1000         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1001                 if (map->sm_fnv != fnv)
1002                         continue;
1003                 if (strcmp(map->sm_path, path) == 0)
1004                         return (map->sm_shmfd);
1005         }
1006
1007         return (NULL);
1008 }
1009
1010 static void
1011 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1012 {
1013         struct shm_mapping *map;
1014
1015         map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1016         map->sm_path = path;
1017         map->sm_fnv = fnv;
1018         map->sm_shmfd = shm_hold(shmfd);
1019         shmfd->shm_path = path;
1020         LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1021 }
1022
1023 static int
1024 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1025 {
1026         struct shm_mapping *map;
1027         int error;
1028
1029         LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1030                 if (map->sm_fnv != fnv)
1031                         continue;
1032                 if (strcmp(map->sm_path, path) == 0) {
1033 #ifdef MAC
1034                         error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1035                         if (error)
1036                                 return (error);
1037 #endif
1038                         error = shm_access(map->sm_shmfd, ucred,
1039                             FREAD | FWRITE);
1040                         if (error)
1041                                 return (error);
1042                         shm_doremove(map);
1043                         return (0);
1044                 }
1045         }
1046
1047         return (ENOENT);
1048 }
1049
1050 static void
1051 shm_doremove(struct shm_mapping *map)
1052 {
1053         map->sm_shmfd->shm_path = NULL;
1054         LIST_REMOVE(map, sm_link);
1055         shm_drop(map->sm_shmfd);
1056         free(map->sm_path, M_SHMFD);
1057         free(map, M_SHMFD);
1058 }
1059
1060 int
1061 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1062     int shmflags, struct filecaps *fcaps, const char *name __unused)
1063 {
1064         struct pwddesc *pdp;
1065         struct shmfd *shmfd;
1066         struct file *fp;
1067         char *path;
1068         void *rl_cookie;
1069         Fnv32_t fnv;
1070         mode_t cmode;
1071         int error, fd, initial_seals;
1072         bool largepage;
1073
1074         if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1075             SHM_LARGEPAGE)) != 0)
1076                 return (EINVAL);
1077
1078         initial_seals = F_SEAL_SEAL;
1079         if ((shmflags & SHM_ALLOW_SEALING) != 0)
1080                 initial_seals &= ~F_SEAL_SEAL;
1081
1082 #ifdef CAPABILITY_MODE
1083         /*
1084          * shm_open(2) is only allowed for anonymous objects.
1085          */
1086         if (IN_CAPABILITY_MODE(td) && (userpath != SHM_ANON))
1087                 return (ECAPMODE);
1088 #endif
1089
1090         AUDIT_ARG_FFLAGS(flags);
1091         AUDIT_ARG_MODE(mode);
1092
1093         if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1094                 return (EINVAL);
1095
1096         if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1097                 return (EINVAL);
1098
1099         largepage = (shmflags & SHM_LARGEPAGE) != 0;
1100         if (largepage && !PMAP_HAS_LARGEPAGES)
1101                 return (ENOTTY);
1102
1103         /*
1104          * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1105          * If the decision is made later to allow additional seals, care must be
1106          * taken below to ensure that the seals are properly set if the shmfd
1107          * already existed -- this currently assumes that only F_SEAL_SEAL can
1108          * be set and doesn't take further precautions to ensure the validity of
1109          * the seals being added with respect to current mappings.
1110          */
1111         if ((initial_seals & ~F_SEAL_SEAL) != 0)
1112                 return (EINVAL);
1113
1114         pdp = td->td_proc->p_pd;
1115         cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1116
1117         /*
1118          * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1119          * by POSIX.  We allow it to be unset here so that an in-kernel
1120          * interface may be written as a thin layer around shm, optionally not
1121          * setting CLOEXEC.  For shm_open(2), O_CLOEXEC is set unconditionally
1122          * in sys_shm_open() to keep this implementation compliant.
1123          */
1124         error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1125         if (error)
1126                 return (error);
1127
1128         /* A SHM_ANON path pointer creates an anonymous object. */
1129         if (userpath == SHM_ANON) {
1130                 /* A read-only anonymous object is pointless. */
1131                 if ((flags & O_ACCMODE) == O_RDONLY) {
1132                         fdclose(td, fp, fd);
1133                         fdrop(fp, td);
1134                         return (EINVAL);
1135                 }
1136                 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1137                 shmfd->shm_seals = initial_seals;
1138                 shmfd->shm_flags = shmflags;
1139         } else {
1140                 error = shm_copyin_path(td, userpath, &path);
1141                 if (error != 0) {
1142                         fdclose(td, fp, fd);
1143                         fdrop(fp, td);
1144                         return (error);
1145                 }
1146
1147                 AUDIT_ARG_UPATH1_CANON(path);
1148                 fnv = fnv_32_str(path, FNV1_32_INIT);
1149                 sx_xlock(&shm_dict_lock);
1150                 shmfd = shm_lookup(path, fnv);
1151                 if (shmfd == NULL) {
1152                         /* Object does not yet exist, create it if requested. */
1153                         if (flags & O_CREAT) {
1154 #ifdef MAC
1155                                 error = mac_posixshm_check_create(td->td_ucred,
1156                                     path);
1157                                 if (error == 0) {
1158 #endif
1159                                         shmfd = shm_alloc(td->td_ucred, cmode,
1160                                             largepage);
1161                                         shmfd->shm_seals = initial_seals;
1162                                         shmfd->shm_flags = shmflags;
1163                                         shm_insert(path, fnv, shmfd);
1164 #ifdef MAC
1165                                 }
1166 #endif
1167                         } else {
1168                                 free(path, M_SHMFD);
1169                                 error = ENOENT;
1170                         }
1171                 } else {
1172                         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1173                             &shmfd->shm_mtx);
1174
1175                         /*
1176                          * kern_shm_open() likely shouldn't ever error out on
1177                          * trying to set a seal that already exists, unlike
1178                          * F_ADD_SEALS.  This would break terribly as
1179                          * shm_open(2) actually sets F_SEAL_SEAL to maintain
1180                          * historical behavior where the underlying file could
1181                          * not be sealed.
1182                          */
1183                         initial_seals &= ~shmfd->shm_seals;
1184
1185                         /*
1186                          * Object already exists, obtain a new
1187                          * reference if requested and permitted.
1188                          */
1189                         free(path, M_SHMFD);
1190
1191                         /*
1192                          * initial_seals can't set additional seals if we've
1193                          * already been set F_SEAL_SEAL.  If F_SEAL_SEAL is set,
1194                          * then we've already removed that one from
1195                          * initial_seals.  This is currently redundant as we
1196                          * only allow setting F_SEAL_SEAL at creation time, but
1197                          * it's cheap to check and decreases the effort required
1198                          * to allow additional seals.
1199                          */
1200                         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1201                             initial_seals != 0)
1202                                 error = EPERM;
1203                         else if ((flags & (O_CREAT | O_EXCL)) ==
1204                             (O_CREAT | O_EXCL))
1205                                 error = EEXIST;
1206                         else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1207                                 error = EINVAL;
1208                         else {
1209 #ifdef MAC
1210                                 error = mac_posixshm_check_open(td->td_ucred,
1211                                     shmfd, FFLAGS(flags & O_ACCMODE));
1212                                 if (error == 0)
1213 #endif
1214                                 error = shm_access(shmfd, td->td_ucred,
1215                                     FFLAGS(flags & O_ACCMODE));
1216                         }
1217
1218                         /*
1219                          * Truncate the file back to zero length if
1220                          * O_TRUNC was specified and the object was
1221                          * opened with read/write.
1222                          */
1223                         if (error == 0 &&
1224                             (flags & (O_ACCMODE | O_TRUNC)) ==
1225                             (O_RDWR | O_TRUNC)) {
1226                                 VM_OBJECT_WLOCK(shmfd->shm_object);
1227 #ifdef MAC
1228                                 error = mac_posixshm_check_truncate(
1229                                         td->td_ucred, fp->f_cred, shmfd);
1230                                 if (error == 0)
1231 #endif
1232                                         error = shm_dotruncate_locked(shmfd, 0,
1233                                             rl_cookie);
1234                                 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1235                         }
1236                         if (error == 0) {
1237                                 /*
1238                                  * Currently we only allow F_SEAL_SEAL to be
1239                                  * set initially.  As noted above, this would
1240                                  * need to be reworked should that change.
1241                                  */
1242                                 shmfd->shm_seals |= initial_seals;
1243                                 shm_hold(shmfd);
1244                         }
1245                         rangelock_unlock(&shmfd->shm_rl, rl_cookie,
1246                             &shmfd->shm_mtx);
1247                 }
1248                 sx_xunlock(&shm_dict_lock);
1249
1250                 if (error) {
1251                         fdclose(td, fp, fd);
1252                         fdrop(fp, td);
1253                         return (error);
1254                 }
1255         }
1256
1257         finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1258
1259         td->td_retval[0] = fd;
1260         fdrop(fp, td);
1261
1262         return (0);
1263 }
1264
1265 /* System calls. */
1266 #ifdef COMPAT_FREEBSD12
1267 int
1268 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1269 {
1270
1271         return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1272             uap->mode, NULL));
1273 }
1274 #endif
1275
1276 int
1277 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1278 {
1279         char *path;
1280         Fnv32_t fnv;
1281         int error;
1282
1283         error = shm_copyin_path(td, uap->path, &path);
1284         if (error != 0)
1285                 return (error);
1286
1287         AUDIT_ARG_UPATH1_CANON(path);
1288         fnv = fnv_32_str(path, FNV1_32_INIT);
1289         sx_xlock(&shm_dict_lock);
1290         error = shm_remove(path, fnv, td->td_ucred);
1291         sx_xunlock(&shm_dict_lock);
1292         free(path, M_SHMFD);
1293
1294         return (error);
1295 }
1296
1297 int
1298 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1299 {
1300         char *path_from = NULL, *path_to = NULL;
1301         Fnv32_t fnv_from, fnv_to;
1302         struct shmfd *fd_from;
1303         struct shmfd *fd_to;
1304         int error;
1305         int flags;
1306
1307         flags = uap->flags;
1308         AUDIT_ARG_FFLAGS(flags);
1309
1310         /*
1311          * Make sure the user passed only valid flags.
1312          * If you add a new flag, please add a new term here.
1313          */
1314         if ((flags & ~(
1315             SHM_RENAME_NOREPLACE |
1316             SHM_RENAME_EXCHANGE
1317             )) != 0) {
1318                 error = EINVAL;
1319                 goto out;
1320         }
1321
1322         /*
1323          * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1324          * force the user to choose one or the other.
1325          */
1326         if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1327             (flags & SHM_RENAME_EXCHANGE) != 0) {
1328                 error = EINVAL;
1329                 goto out;
1330         }
1331
1332         /* Renaming to or from anonymous makes no sense */
1333         if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1334                 error = EINVAL;
1335                 goto out;
1336         }
1337
1338         error = shm_copyin_path(td, uap->path_from, &path_from);
1339         if (error != 0)
1340                 goto out;
1341
1342         error = shm_copyin_path(td, uap->path_to, &path_to);
1343         if (error != 0)
1344                 goto out;
1345
1346         AUDIT_ARG_UPATH1_CANON(path_from);
1347         AUDIT_ARG_UPATH2_CANON(path_to);
1348
1349         /* Rename with from/to equal is a no-op */
1350         if (strcmp(path_from, path_to) == 0)
1351                 goto out;
1352
1353         fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1354         fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1355
1356         sx_xlock(&shm_dict_lock);
1357
1358         fd_from = shm_lookup(path_from, fnv_from);
1359         if (fd_from == NULL) {
1360                 error = ENOENT;
1361                 goto out_locked;
1362         }
1363
1364         fd_to = shm_lookup(path_to, fnv_to);
1365         if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1366                 error = EEXIST;
1367                 goto out_locked;
1368         }
1369
1370         /*
1371          * Unconditionally prevents shm_remove from invalidating the 'from'
1372          * shm's state.
1373          */
1374         shm_hold(fd_from);
1375         error = shm_remove(path_from, fnv_from, td->td_ucred);
1376
1377         /*
1378          * One of my assumptions failed if ENOENT (e.g. locking didn't
1379          * protect us)
1380          */
1381         KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1382             path_from));
1383         if (error != 0) {
1384                 shm_drop(fd_from);
1385                 goto out_locked;
1386         }
1387
1388         /*
1389          * If we are exchanging, we need to ensure the shm_remove below
1390          * doesn't invalidate the dest shm's state.
1391          */
1392         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1393                 shm_hold(fd_to);
1394
1395         /*
1396          * NOTE: if path_to is not already in the hash, c'est la vie;
1397          * it simply means we have nothing already at path_to to unlink.
1398          * That is the ENOENT case.
1399          *
1400          * If we somehow don't have access to unlink this guy, but
1401          * did for the shm at path_from, then relink the shm to path_from
1402          * and abort with EACCES.
1403          *
1404          * All other errors: that is weird; let's relink and abort the
1405          * operation.
1406          */
1407         error = shm_remove(path_to, fnv_to, td->td_ucred);
1408         if (error != 0 && error != ENOENT) {
1409                 shm_insert(path_from, fnv_from, fd_from);
1410                 shm_drop(fd_from);
1411                 /* Don't free path_from now, since the hash references it */
1412                 path_from = NULL;
1413                 goto out_locked;
1414         }
1415
1416         error = 0;
1417
1418         shm_insert(path_to, fnv_to, fd_from);
1419
1420         /* Don't free path_to now, since the hash references it */
1421         path_to = NULL;
1422
1423         /* We kept a ref when we removed, and incremented again in insert */
1424         shm_drop(fd_from);
1425         KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1426             fd_from->shm_refs));
1427
1428         if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1429                 shm_insert(path_from, fnv_from, fd_to);
1430                 path_from = NULL;
1431                 shm_drop(fd_to);
1432                 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1433                     fd_to->shm_refs));
1434         }
1435
1436 out_locked:
1437         sx_xunlock(&shm_dict_lock);
1438
1439 out:
1440         free(path_from, M_SHMFD);
1441         free(path_to, M_SHMFD);
1442         return (error);
1443 }
1444
1445 static int
1446 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1447     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1448     vm_ooffset_t foff, struct thread *td)
1449 {
1450         struct vmspace *vms;
1451         vm_map_entry_t next_entry, prev_entry;
1452         vm_offset_t align, mask, maxaddr;
1453         int docow, error, rv, try;
1454         bool curmap;
1455
1456         if (shmfd->shm_lp_psind == 0)
1457                 return (EINVAL);
1458
1459         /* MAP_PRIVATE is disabled */
1460         if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1461             MAP_NOCORE |
1462 #ifdef MAP_32BIT
1463             MAP_32BIT |
1464 #endif
1465             MAP_ALIGNMENT_MASK)) != 0)
1466                 return (EINVAL);
1467
1468         vms = td->td_proc->p_vmspace;
1469         curmap = map == &vms->vm_map;
1470         if (curmap) {
1471                 error = kern_mmap_racct_check(td, map, size);
1472                 if (error != 0)
1473                         return (error);
1474         }
1475
1476         docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1477         docow |= MAP_INHERIT_SHARE;
1478         if ((flags & MAP_NOCORE) != 0)
1479                 docow |= MAP_DISABLE_COREDUMP;
1480
1481         mask = pagesizes[shmfd->shm_lp_psind] - 1;
1482         if ((foff & mask) != 0)
1483                 return (EINVAL);
1484         maxaddr = vm_map_max(map);
1485 #ifdef MAP_32BIT
1486         if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1487                 maxaddr = MAP_32BIT_MAX_ADDR;
1488 #endif
1489         if (size == 0 || (size & mask) != 0 ||
1490             (*addr != 0 && ((*addr & mask) != 0 ||
1491             *addr + size < *addr || *addr + size > maxaddr)))
1492                 return (EINVAL);
1493
1494         align = flags & MAP_ALIGNMENT_MASK;
1495         if (align == 0) {
1496                 align = pagesizes[shmfd->shm_lp_psind];
1497         } else if (align == MAP_ALIGNED_SUPER) {
1498                 if (shmfd->shm_lp_psind != 1)
1499                         return (EINVAL);
1500                 align = pagesizes[1];
1501         } else {
1502                 align >>= MAP_ALIGNMENT_SHIFT;
1503                 align = 1ULL << align;
1504                 /* Also handles overflow. */
1505                 if (align < pagesizes[shmfd->shm_lp_psind])
1506                         return (EINVAL);
1507         }
1508
1509         vm_map_lock(map);
1510         if ((flags & MAP_FIXED) == 0) {
1511                 try = 1;
1512                 if (curmap && (*addr == 0 ||
1513                     (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1514                     *addr < round_page((vm_offset_t)vms->vm_daddr +
1515                     lim_max(td, RLIMIT_DATA))))) {
1516                         *addr = roundup2((vm_offset_t)vms->vm_daddr +
1517                             lim_max(td, RLIMIT_DATA),
1518                             pagesizes[shmfd->shm_lp_psind]);
1519                 }
1520 again:
1521                 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1522                 if (rv != KERN_SUCCESS) {
1523                         if (try == 1) {
1524                                 try = 2;
1525                                 *addr = vm_map_min(map);
1526                                 if ((*addr & mask) != 0)
1527                                         *addr = (*addr + mask) & mask;
1528                                 goto again;
1529                         }
1530                         goto fail1;
1531                 }
1532         } else if ((flags & MAP_EXCL) == 0) {
1533                 rv = vm_map_delete(map, *addr, *addr + size);
1534                 if (rv != KERN_SUCCESS)
1535                         goto fail1;
1536         } else {
1537                 error = ENOSPC;
1538                 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1539                         goto fail;
1540                 next_entry = vm_map_entry_succ(prev_entry);
1541                 if (next_entry->start < *addr + size)
1542                         goto fail;
1543         }
1544
1545         rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1546             prot, max_prot, docow);
1547 fail1:
1548         error = vm_mmap_to_errno(rv);
1549 fail:
1550         vm_map_unlock(map);
1551         return (error);
1552 }
1553
1554 static int
1555 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1556     vm_prot_t prot, vm_prot_t cap_maxprot, int flags,
1557     vm_ooffset_t foff, struct thread *td)
1558 {
1559         struct shmfd *shmfd;
1560         vm_prot_t maxprot;
1561         int error;
1562         bool writecnt;
1563         void *rl_cookie;
1564
1565         shmfd = fp->f_data;
1566         maxprot = VM_PROT_NONE;
1567
1568         rl_cookie = rangelock_rlock(&shmfd->shm_rl, 0, objsize,
1569             &shmfd->shm_mtx);
1570         /* FREAD should always be set. */
1571         if ((fp->f_flag & FREAD) != 0)
1572                 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1573
1574         /*
1575          * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1576          * mapping with a write seal applied.  Private mappings are always
1577          * writeable.
1578          */
1579         if ((flags & MAP_SHARED) == 0) {
1580                 cap_maxprot |= VM_PROT_WRITE;
1581                 maxprot |= VM_PROT_WRITE;
1582                 writecnt = false;
1583         } else {
1584                 if ((fp->f_flag & FWRITE) != 0 &&
1585                     (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1586                         maxprot |= VM_PROT_WRITE;
1587
1588                 /*
1589                  * Any mappings from a writable descriptor may be upgraded to
1590                  * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1591                  * applied between the open and subsequent mmap(2).  We want to
1592                  * reject application of a write seal as long as any such
1593                  * mapping exists so that the seal cannot be trivially bypassed.
1594                  */
1595                 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1596                 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1597                         error = EACCES;
1598                         goto out;
1599                 }
1600         }
1601         maxprot &= cap_maxprot;
1602
1603         /* See comment in vn_mmap(). */
1604         if (
1605 #ifdef _LP64
1606             objsize > OFF_MAX ||
1607 #endif
1608             foff > OFF_MAX - objsize) {
1609                 error = EINVAL;
1610                 goto out;
1611         }
1612
1613 #ifdef MAC
1614         error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1615         if (error != 0)
1616                 goto out;
1617 #endif
1618
1619         mtx_lock(&shm_timestamp_lock);
1620         vfs_timestamp(&shmfd->shm_atime);
1621         mtx_unlock(&shm_timestamp_lock);
1622         vm_object_reference(shmfd->shm_object);
1623
1624         if (shm_largepage(shmfd)) {
1625                 writecnt = false;
1626                 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1627                     maxprot, flags, foff, td);
1628         } else {
1629                 if (writecnt) {
1630                         vm_pager_update_writecount(shmfd->shm_object, 0,
1631                             objsize);
1632                 }
1633                 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1634                     shmfd->shm_object, foff, writecnt, td);
1635         }
1636         if (error != 0) {
1637                 if (writecnt)
1638                         vm_pager_release_writecount(shmfd->shm_object, 0,
1639                             objsize);
1640                 vm_object_deallocate(shmfd->shm_object);
1641         }
1642 out:
1643         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1644         return (error);
1645 }
1646
1647 static int
1648 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1649     struct thread *td)
1650 {
1651         struct shmfd *shmfd;
1652         int error;
1653
1654         error = 0;
1655         shmfd = fp->f_data;
1656         mtx_lock(&shm_timestamp_lock);
1657         /*
1658          * SUSv4 says that x bits of permission need not be affected.
1659          * Be consistent with our shm_open there.
1660          */
1661 #ifdef MAC
1662         error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1663         if (error != 0)
1664                 goto out;
1665 #endif
1666         error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1667             VADMIN, active_cred);
1668         if (error != 0)
1669                 goto out;
1670         shmfd->shm_mode = mode & ACCESSPERMS;
1671 out:
1672         mtx_unlock(&shm_timestamp_lock);
1673         return (error);
1674 }
1675
1676 static int
1677 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1678     struct thread *td)
1679 {
1680         struct shmfd *shmfd;
1681         int error;
1682
1683         error = 0;
1684         shmfd = fp->f_data;
1685         mtx_lock(&shm_timestamp_lock);
1686 #ifdef MAC
1687         error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1688         if (error != 0)
1689                 goto out;
1690 #endif
1691         if (uid == (uid_t)-1)
1692                 uid = shmfd->shm_uid;
1693         if (gid == (gid_t)-1)
1694                  gid = shmfd->shm_gid;
1695         if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1696             (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1697             (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1698                 goto out;
1699         shmfd->shm_uid = uid;
1700         shmfd->shm_gid = gid;
1701 out:
1702         mtx_unlock(&shm_timestamp_lock);
1703         return (error);
1704 }
1705
1706 /*
1707  * Helper routines to allow the backing object of a shared memory file
1708  * descriptor to be mapped in the kernel.
1709  */
1710 int
1711 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1712 {
1713         struct shmfd *shmfd;
1714         vm_offset_t kva, ofs;
1715         vm_object_t obj;
1716         int rv;
1717
1718         if (fp->f_type != DTYPE_SHM)
1719                 return (EINVAL);
1720         shmfd = fp->f_data;
1721         obj = shmfd->shm_object;
1722         VM_OBJECT_WLOCK(obj);
1723         /*
1724          * XXXRW: This validation is probably insufficient, and subject to
1725          * sign errors.  It should be fixed.
1726          */
1727         if (offset >= shmfd->shm_size ||
1728             offset + size > round_page(shmfd->shm_size)) {
1729                 VM_OBJECT_WUNLOCK(obj);
1730                 return (EINVAL);
1731         }
1732
1733         shmfd->shm_kmappings++;
1734         vm_object_reference_locked(obj);
1735         VM_OBJECT_WUNLOCK(obj);
1736
1737         /* Map the object into the kernel_map and wire it. */
1738         kva = vm_map_min(kernel_map);
1739         ofs = offset & PAGE_MASK;
1740         offset = trunc_page(offset);
1741         size = round_page(size + ofs);
1742         rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1743             VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1744             VM_PROT_READ | VM_PROT_WRITE, 0);
1745         if (rv == KERN_SUCCESS) {
1746                 rv = vm_map_wire(kernel_map, kva, kva + size,
1747                     VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1748                 if (rv == KERN_SUCCESS) {
1749                         *memp = (void *)(kva + ofs);
1750                         return (0);
1751                 }
1752                 vm_map_remove(kernel_map, kva, kva + size);
1753         } else
1754                 vm_object_deallocate(obj);
1755
1756         /* On failure, drop our mapping reference. */
1757         VM_OBJECT_WLOCK(obj);
1758         shmfd->shm_kmappings--;
1759         VM_OBJECT_WUNLOCK(obj);
1760
1761         return (vm_mmap_to_errno(rv));
1762 }
1763
1764 /*
1765  * We require the caller to unmap the entire entry.  This allows us to
1766  * safely decrement shm_kmappings when a mapping is removed.
1767  */
1768 int
1769 shm_unmap(struct file *fp, void *mem, size_t size)
1770 {
1771         struct shmfd *shmfd;
1772         vm_map_entry_t entry;
1773         vm_offset_t kva, ofs;
1774         vm_object_t obj;
1775         vm_pindex_t pindex;
1776         vm_prot_t prot;
1777         boolean_t wired;
1778         vm_map_t map;
1779         int rv;
1780
1781         if (fp->f_type != DTYPE_SHM)
1782                 return (EINVAL);
1783         shmfd = fp->f_data;
1784         kva = (vm_offset_t)mem;
1785         ofs = kva & PAGE_MASK;
1786         kva = trunc_page(kva);
1787         size = round_page(size + ofs);
1788         map = kernel_map;
1789         rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1790             &obj, &pindex, &prot, &wired);
1791         if (rv != KERN_SUCCESS)
1792                 return (EINVAL);
1793         if (entry->start != kva || entry->end != kva + size) {
1794                 vm_map_lookup_done(map, entry);
1795                 return (EINVAL);
1796         }
1797         vm_map_lookup_done(map, entry);
1798         if (obj != shmfd->shm_object)
1799                 return (EINVAL);
1800         vm_map_remove(map, kva, kva + size);
1801         VM_OBJECT_WLOCK(obj);
1802         KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1803         shmfd->shm_kmappings--;
1804         VM_OBJECT_WUNLOCK(obj);
1805         return (0);
1806 }
1807
1808 static int
1809 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1810 {
1811         const char *path, *pr_path;
1812         size_t pr_pathlen;
1813         bool visible;
1814
1815         sx_assert(&shm_dict_lock, SA_LOCKED);
1816         kif->kf_type = KF_TYPE_SHM;
1817         kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1818         kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1819         if (shmfd->shm_path != NULL) {
1820                 if (shmfd->shm_path != NULL) {
1821                         path = shmfd->shm_path;
1822                         pr_path = curthread->td_ucred->cr_prison->pr_path;
1823                         if (strcmp(pr_path, "/") != 0) {
1824                                 /* Return the jail-rooted pathname. */
1825                                 pr_pathlen = strlen(pr_path);
1826                                 visible = strncmp(path, pr_path, pr_pathlen)
1827                                     == 0 && path[pr_pathlen] == '/';
1828                                 if (list && !visible)
1829                                         return (EPERM);
1830                                 if (visible)
1831                                         path += pr_pathlen;
1832                         }
1833                         strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1834                 }
1835         }
1836         return (0);
1837 }
1838
1839 static int
1840 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1841     struct filedesc *fdp __unused)
1842 {
1843         int res;
1844
1845         sx_slock(&shm_dict_lock);
1846         res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1847         sx_sunlock(&shm_dict_lock);
1848         return (res);
1849 }
1850
1851 static int
1852 shm_add_seals(struct file *fp, int seals)
1853 {
1854         struct shmfd *shmfd;
1855         void *rl_cookie;
1856         vm_ooffset_t writemappings;
1857         int error, nseals;
1858
1859         error = 0;
1860         shmfd = fp->f_data;
1861         rl_cookie = rangelock_wlock(&shmfd->shm_rl, 0, OFF_MAX,
1862             &shmfd->shm_mtx);
1863
1864         /* Even already-set seals should result in EPERM. */
1865         if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1866                 error = EPERM;
1867                 goto out;
1868         }
1869         nseals = seals & ~shmfd->shm_seals;
1870         if ((nseals & F_SEAL_WRITE) != 0) {
1871                 if (shm_largepage(shmfd)) {
1872                         error = ENOTSUP;
1873                         goto out;
1874                 }
1875
1876                 /*
1877                  * The rangelock above prevents writable mappings from being
1878                  * added after we've started applying seals.  The RLOCK here
1879                  * is to avoid torn reads on ILP32 arches as unmapping/reducing
1880                  * writemappings will be done without a rangelock.
1881                  */
1882                 VM_OBJECT_RLOCK(shmfd->shm_object);
1883                 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1884                 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1885                 /* kmappings are also writable */
1886                 if (writemappings > 0) {
1887                         error = EBUSY;
1888                         goto out;
1889                 }
1890         }
1891         shmfd->shm_seals |= nseals;
1892 out:
1893         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1894         return (error);
1895 }
1896
1897 static int
1898 shm_get_seals(struct file *fp, int *seals)
1899 {
1900         struct shmfd *shmfd;
1901
1902         shmfd = fp->f_data;
1903         *seals = shmfd->shm_seals;
1904         return (0);
1905 }
1906
1907 static int
1908 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
1909 {
1910         void *rl_cookie;
1911         struct shmfd *shmfd;
1912         size_t size;
1913         int error;
1914
1915         /* This assumes that the caller already checked for overflow. */
1916         error = 0;
1917         shmfd = fp->f_data;
1918         size = offset + len;
1919
1920         /*
1921          * Just grab the rangelock for the range that we may be attempting to
1922          * grow, rather than blocking read/write for regions we won't be
1923          * touching while this (potential) resize is in progress.  Other
1924          * attempts to resize the shmfd will have to take a write lock from 0 to
1925          * OFF_MAX, so this being potentially beyond the current usable range of
1926          * the shmfd is not necessarily a concern.  If other mechanisms are
1927          * added to grow a shmfd, this may need to be re-evaluated.
1928          */
1929         rl_cookie = rangelock_wlock(&shmfd->shm_rl, offset, size,
1930             &shmfd->shm_mtx);
1931         if (size > shmfd->shm_size)
1932                 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
1933         rangelock_unlock(&shmfd->shm_rl, rl_cookie, &shmfd->shm_mtx);
1934         /* Translate to posix_fallocate(2) return value as needed. */
1935         if (error == ENOMEM)
1936                 error = ENOSPC;
1937         return (error);
1938 }
1939
1940 static int
1941 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
1942 {
1943         struct shm_mapping *shmm;
1944         struct sbuf sb;
1945         struct kinfo_file kif;
1946         u_long i;
1947         ssize_t curlen;
1948         int error, error2;
1949
1950         sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
1951         sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
1952         curlen = 0;
1953         error = 0;
1954         sx_slock(&shm_dict_lock);
1955         for (i = 0; i < shm_hash + 1; i++) {
1956                 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
1957                         error = shm_fill_kinfo_locked(shmm->sm_shmfd,
1958                             &kif, true);
1959                         if (error == EPERM) {
1960                                 error = 0;
1961                                 continue;
1962                         }
1963                         if (error != 0)
1964                                 break;
1965                         pack_kinfo(&kif);
1966                         error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
1967                             0 : ENOMEM;
1968                         if (error != 0)
1969                                 break;
1970                         curlen += kif.kf_structsize;
1971                 }
1972         }
1973         sx_sunlock(&shm_dict_lock);
1974         error2 = sbuf_finish(&sb);
1975         sbuf_delete(&sb);
1976         return (error != 0 ? error : error2);
1977 }
1978
1979 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
1980     CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
1981     NULL, 0, sysctl_posix_shm_list, "",
1982     "POSIX SHM list");
1983
1984 int
1985 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
1986     struct filecaps *caps)
1987 {
1988
1989         return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
1990 }
1991
1992 /*
1993  * This version of the shm_open() interface leaves CLOEXEC behavior up to the
1994  * caller, and libc will enforce it for the traditional shm_open() call.  This
1995  * allows other consumers, like memfd_create(), to opt-in for CLOEXEC.  This
1996  * interface also includes a 'name' argument that is currently unused, but could
1997  * potentially be exported later via some interface for debugging purposes.
1998  * From the kernel's perspective, it is optional.  Individual consumers like
1999  * memfd_create() may require it in order to be compatible with other systems
2000  * implementing the same function.
2001  */
2002 int
2003 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2004 {
2005
2006         return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2007             uap->shmflags, NULL, uap->name));
2008 }